Linux-libre 5.0.14-gnu
[librecmc/linux-libre.git] / net / sctp / stream_interleave.c
1 /* SCTP kernel implementation
2  * (C) Copyright Red Hat Inc. 2017
3  *
4  * This file is part of the SCTP kernel implementation
5  *
6  * These functions implement sctp stream message interleaving, mostly
7  * including I-DATA and I-FORWARD-TSN chunks process.
8  *
9  * This SCTP implementation is free software;
10  * you can redistribute it and/or modify it under the terms of
11  * the GNU General Public License as published by
12  * the Free Software Foundation; either version 2, or (at your option)
13  * any later version.
14  *
15  * This SCTP implementation is distributed in the hope that it
16  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17  *                 ************************
18  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19  * See the GNU General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with GNU CC; see the file COPYING.  If not, see
23  * <http://www.gnu.org/licenses/>.
24  *
25  * Please send any bug reports or fixes you make to the
26  * email addresched(es):
27  *    lksctp developers <linux-sctp@vger.kernel.org>
28  *
29  * Written or modified by:
30  *    Xin Long <lucien.xin@gmail.com>
31  */
32
33 #include <net/busy_poll.h>
34 #include <net/sctp/sctp.h>
35 #include <net/sctp/sm.h>
36 #include <net/sctp/ulpevent.h>
37 #include <linux/sctp.h>
38
39 static struct sctp_chunk *sctp_make_idatafrag_empty(
40                                         const struct sctp_association *asoc,
41                                         const struct sctp_sndrcvinfo *sinfo,
42                                         int len, __u8 flags, gfp_t gfp)
43 {
44         struct sctp_chunk *retval;
45         struct sctp_idatahdr dp;
46
47         memset(&dp, 0, sizeof(dp));
48         dp.stream = htons(sinfo->sinfo_stream);
49
50         if (sinfo->sinfo_flags & SCTP_UNORDERED)
51                 flags |= SCTP_DATA_UNORDERED;
52
53         retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
54         if (!retval)
55                 return NULL;
56
57         retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
58         memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
59
60         return retval;
61 }
62
63 static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
64 {
65         struct sctp_stream *stream;
66         struct sctp_chunk *lchunk;
67         __u32 cfsn = 0;
68         __u16 sid;
69
70         if (chunk->has_mid)
71                 return;
72
73         sid = sctp_chunk_stream_no(chunk);
74         stream = &chunk->asoc->stream;
75
76         list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
77                 struct sctp_idatahdr *hdr;
78                 __u32 mid;
79
80                 lchunk->has_mid = 1;
81
82                 hdr = lchunk->subh.idata_hdr;
83
84                 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
85                         hdr->ppid = lchunk->sinfo.sinfo_ppid;
86                 else
87                         hdr->fsn = htonl(cfsn++);
88
89                 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
90                         mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
91                                 sctp_mid_uo_next(stream, out, sid) :
92                                 sctp_mid_uo_peek(stream, out, sid);
93                 } else {
94                         mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
95                                 sctp_mid_next(stream, out, sid) :
96                                 sctp_mid_peek(stream, out, sid);
97                 }
98                 hdr->mid = htonl(mid);
99         }
100 }
101
102 static bool sctp_validate_data(struct sctp_chunk *chunk)
103 {
104         const struct sctp_stream *stream;
105         __u16 sid, ssn;
106
107         if (chunk->chunk_hdr->type != SCTP_CID_DATA)
108                 return false;
109
110         if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
111                 return true;
112
113         stream = &chunk->asoc->stream;
114         sid = sctp_chunk_stream_no(chunk);
115         ssn = ntohs(chunk->subh.data_hdr->ssn);
116
117         return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
118 }
119
120 static bool sctp_validate_idata(struct sctp_chunk *chunk)
121 {
122         struct sctp_stream *stream;
123         __u32 mid;
124         __u16 sid;
125
126         if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
127                 return false;
128
129         if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
130                 return true;
131
132         stream = &chunk->asoc->stream;
133         sid = sctp_chunk_stream_no(chunk);
134         mid = ntohl(chunk->subh.idata_hdr->mid);
135
136         return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
137 }
138
139 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
140                                   struct sctp_ulpevent *event)
141 {
142         struct sctp_ulpevent *cevent;
143         struct sk_buff *pos, *loc;
144
145         pos = skb_peek_tail(&ulpq->reasm);
146         if (!pos) {
147                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
148                 return;
149         }
150
151         cevent = sctp_skb2event(pos);
152
153         if (event->stream == cevent->stream &&
154             event->mid == cevent->mid &&
155             (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
156              (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
157               event->fsn > cevent->fsn))) {
158                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
159                 return;
160         }
161
162         if ((event->stream == cevent->stream &&
163              MID_lt(cevent->mid, event->mid)) ||
164             event->stream > cevent->stream) {
165                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
166                 return;
167         }
168
169         loc = NULL;
170         skb_queue_walk(&ulpq->reasm, pos) {
171                 cevent = sctp_skb2event(pos);
172
173                 if (event->stream < cevent->stream ||
174                     (event->stream == cevent->stream &&
175                      MID_lt(event->mid, cevent->mid))) {
176                         loc = pos;
177                         break;
178                 }
179                 if (event->stream == cevent->stream &&
180                     event->mid == cevent->mid &&
181                     !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
182                     (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
183                      event->fsn < cevent->fsn)) {
184                         loc = pos;
185                         break;
186                 }
187         }
188
189         if (!loc)
190                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
191         else
192                 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
193 }
194
195 static struct sctp_ulpevent *sctp_intl_retrieve_partial(
196                                                 struct sctp_ulpq *ulpq,
197                                                 struct sctp_ulpevent *event)
198 {
199         struct sk_buff *first_frag = NULL;
200         struct sk_buff *last_frag = NULL;
201         struct sctp_ulpevent *retval;
202         struct sctp_stream_in *sin;
203         struct sk_buff *pos;
204         __u32 next_fsn = 0;
205         int is_last = 0;
206
207         sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
208
209         skb_queue_walk(&ulpq->reasm, pos) {
210                 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
211
212                 if (cevent->stream < event->stream)
213                         continue;
214
215                 if (cevent->stream > event->stream ||
216                     cevent->mid != sin->mid)
217                         break;
218
219                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
220                 case SCTP_DATA_FIRST_FRAG:
221                         goto out;
222                 case SCTP_DATA_MIDDLE_FRAG:
223                         if (!first_frag) {
224                                 if (cevent->fsn == sin->fsn) {
225                                         first_frag = pos;
226                                         last_frag = pos;
227                                         next_fsn = cevent->fsn + 1;
228                                 }
229                         } else if (cevent->fsn == next_fsn) {
230                                 last_frag = pos;
231                                 next_fsn++;
232                         } else {
233                                 goto out;
234                         }
235                         break;
236                 case SCTP_DATA_LAST_FRAG:
237                         if (!first_frag) {
238                                 if (cevent->fsn == sin->fsn) {
239                                         first_frag = pos;
240                                         last_frag = pos;
241                                         next_fsn = 0;
242                                         is_last = 1;
243                                 }
244                         } else if (cevent->fsn == next_fsn) {
245                                 last_frag = pos;
246                                 next_fsn = 0;
247                                 is_last = 1;
248                         }
249                         goto out;
250                 default:
251                         goto out;
252                 }
253         }
254
255 out:
256         if (!first_frag)
257                 return NULL;
258
259         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
260                                              &ulpq->reasm, first_frag,
261                                              last_frag);
262         if (retval) {
263                 sin->fsn = next_fsn;
264                 if (is_last) {
265                         retval->msg_flags |= MSG_EOR;
266                         sin->pd_mode = 0;
267                 }
268         }
269
270         return retval;
271 }
272
273 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
274                                                 struct sctp_ulpq *ulpq,
275                                                 struct sctp_ulpevent *event)
276 {
277         struct sctp_association *asoc = ulpq->asoc;
278         struct sk_buff *pos, *first_frag = NULL;
279         struct sctp_ulpevent *retval = NULL;
280         struct sk_buff *pd_first = NULL;
281         struct sk_buff *pd_last = NULL;
282         struct sctp_stream_in *sin;
283         __u32 next_fsn = 0;
284         __u32 pd_point = 0;
285         __u32 pd_len = 0;
286         __u32 mid = 0;
287
288         sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
289
290         skb_queue_walk(&ulpq->reasm, pos) {
291                 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
292
293                 if (cevent->stream < event->stream)
294                         continue;
295                 if (cevent->stream > event->stream)
296                         break;
297
298                 if (MID_lt(cevent->mid, event->mid))
299                         continue;
300                 if (MID_lt(event->mid, cevent->mid))
301                         break;
302
303                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
304                 case SCTP_DATA_FIRST_FRAG:
305                         if (cevent->mid == sin->mid) {
306                                 pd_first = pos;
307                                 pd_last = pos;
308                                 pd_len = pos->len;
309                         }
310
311                         first_frag = pos;
312                         next_fsn = 0;
313                         mid = cevent->mid;
314                         break;
315
316                 case SCTP_DATA_MIDDLE_FRAG:
317                         if (first_frag && cevent->mid == mid &&
318                             cevent->fsn == next_fsn) {
319                                 next_fsn++;
320                                 if (pd_first) {
321                                         pd_last = pos;
322                                         pd_len += pos->len;
323                                 }
324                         } else {
325                                 first_frag = NULL;
326                         }
327                         break;
328
329                 case SCTP_DATA_LAST_FRAG:
330                         if (first_frag && cevent->mid == mid &&
331                             cevent->fsn == next_fsn)
332                                 goto found;
333                         else
334                                 first_frag = NULL;
335                         break;
336                 }
337         }
338
339         if (!pd_first)
340                 goto out;
341
342         pd_point = sctp_sk(asoc->base.sk)->pd_point;
343         if (pd_point && pd_point <= pd_len) {
344                 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
345                                                      &ulpq->reasm,
346                                                      pd_first, pd_last);
347                 if (retval) {
348                         sin->fsn = next_fsn;
349                         sin->pd_mode = 1;
350                 }
351         }
352         goto out;
353
354 found:
355         retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
356                                              &ulpq->reasm,
357                                              first_frag, pos);
358         if (retval)
359                 retval->msg_flags |= MSG_EOR;
360
361 out:
362         return retval;
363 }
364
365 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
366                                              struct sctp_ulpevent *event)
367 {
368         struct sctp_ulpevent *retval = NULL;
369         struct sctp_stream_in *sin;
370
371         if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
372                 event->msg_flags |= MSG_EOR;
373                 return event;
374         }
375
376         sctp_intl_store_reasm(ulpq, event);
377
378         sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
379         if (sin->pd_mode && event->mid == sin->mid &&
380             event->fsn == sin->fsn)
381                 retval = sctp_intl_retrieve_partial(ulpq, event);
382
383         if (!retval)
384                 retval = sctp_intl_retrieve_reassembled(ulpq, event);
385
386         return retval;
387 }
388
389 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
390                                     struct sctp_ulpevent *event)
391 {
392         struct sctp_ulpevent *cevent;
393         struct sk_buff *pos, *loc;
394
395         pos = skb_peek_tail(&ulpq->lobby);
396         if (!pos) {
397                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
398                 return;
399         }
400
401         cevent = (struct sctp_ulpevent *)pos->cb;
402         if (event->stream == cevent->stream &&
403             MID_lt(cevent->mid, event->mid)) {
404                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
405                 return;
406         }
407
408         if (event->stream > cevent->stream) {
409                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
410                 return;
411         }
412
413         loc = NULL;
414         skb_queue_walk(&ulpq->lobby, pos) {
415                 cevent = (struct sctp_ulpevent *)pos->cb;
416
417                 if (cevent->stream > event->stream) {
418                         loc = pos;
419                         break;
420                 }
421                 if (cevent->stream == event->stream &&
422                     MID_lt(event->mid, cevent->mid)) {
423                         loc = pos;
424                         break;
425                 }
426         }
427
428         if (!loc)
429                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
430         else
431                 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
432 }
433
434 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
435                                        struct sctp_ulpevent *event)
436 {
437         struct sk_buff_head *event_list;
438         struct sctp_stream *stream;
439         struct sk_buff *pos, *tmp;
440         __u16 sid = event->stream;
441
442         stream  = &ulpq->asoc->stream;
443         event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
444
445         sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
446                 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
447
448                 if (cevent->stream > sid)
449                         break;
450
451                 if (cevent->stream < sid)
452                         continue;
453
454                 if (cevent->mid != sctp_mid_peek(stream, in, sid))
455                         break;
456
457                 sctp_mid_next(stream, in, sid);
458
459                 __skb_unlink(pos, &ulpq->lobby);
460
461                 __skb_queue_tail(event_list, pos);
462         }
463 }
464
465 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
466                                              struct sctp_ulpevent *event)
467 {
468         struct sctp_stream *stream;
469         __u16 sid;
470
471         stream  = &ulpq->asoc->stream;
472         sid = event->stream;
473
474         if (event->mid != sctp_mid_peek(stream, in, sid)) {
475                 sctp_intl_store_ordered(ulpq, event);
476                 return NULL;
477         }
478
479         sctp_mid_next(stream, in, sid);
480
481         sctp_intl_retrieve_ordered(ulpq, event);
482
483         return event;
484 }
485
486 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
487                               struct sctp_ulpevent *event)
488 {
489         struct sk_buff *skb = sctp_event2skb(event);
490         struct sock *sk = ulpq->asoc->base.sk;
491         struct sctp_sock *sp = sctp_sk(sk);
492         struct sk_buff_head *skb_list;
493
494         skb_list = (struct sk_buff_head *)skb->prev;
495
496         if (sk->sk_shutdown & RCV_SHUTDOWN &&
497             (sk->sk_shutdown & SEND_SHUTDOWN ||
498              !sctp_ulpevent_is_notification(event)))
499                 goto out_free;
500
501         if (!sctp_ulpevent_is_notification(event)) {
502                 sk_mark_napi_id(sk, skb);
503                 sk_incoming_cpu_update(sk);
504         }
505
506         if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
507                 goto out_free;
508
509         if (skb_list)
510                 skb_queue_splice_tail_init(skb_list,
511                                            &sk->sk_receive_queue);
512         else
513                 __skb_queue_tail(&sk->sk_receive_queue, skb);
514
515         if (!sp->data_ready_signalled) {
516                 sp->data_ready_signalled = 1;
517                 sk->sk_data_ready(sk);
518         }
519
520         return 1;
521
522 out_free:
523         if (skb_list)
524                 sctp_queue_purge_ulpevents(skb_list);
525         else
526                 sctp_ulpevent_free(event);
527
528         return 0;
529 }
530
531 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
532                                      struct sctp_ulpevent *event)
533 {
534         struct sctp_ulpevent *cevent;
535         struct sk_buff *pos;
536
537         pos = skb_peek_tail(&ulpq->reasm_uo);
538         if (!pos) {
539                 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
540                 return;
541         }
542
543         cevent = sctp_skb2event(pos);
544
545         if (event->stream == cevent->stream &&
546             event->mid == cevent->mid &&
547             (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
548              (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
549               event->fsn > cevent->fsn))) {
550                 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
551                 return;
552         }
553
554         if ((event->stream == cevent->stream &&
555              MID_lt(cevent->mid, event->mid)) ||
556             event->stream > cevent->stream) {
557                 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
558                 return;
559         }
560
561         skb_queue_walk(&ulpq->reasm_uo, pos) {
562                 cevent = sctp_skb2event(pos);
563
564                 if (event->stream < cevent->stream ||
565                     (event->stream == cevent->stream &&
566                      MID_lt(event->mid, cevent->mid)))
567                         break;
568
569                 if (event->stream == cevent->stream &&
570                     event->mid == cevent->mid &&
571                     !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
572                     (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
573                      event->fsn < cevent->fsn))
574                         break;
575         }
576
577         __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
578 }
579
580 static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
581                                                 struct sctp_ulpq *ulpq,
582                                                 struct sctp_ulpevent *event)
583 {
584         struct sk_buff *first_frag = NULL;
585         struct sk_buff *last_frag = NULL;
586         struct sctp_ulpevent *retval;
587         struct sctp_stream_in *sin;
588         struct sk_buff *pos;
589         __u32 next_fsn = 0;
590         int is_last = 0;
591
592         sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
593
594         skb_queue_walk(&ulpq->reasm_uo, pos) {
595                 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
596
597                 if (cevent->stream < event->stream)
598                         continue;
599                 if (cevent->stream > event->stream)
600                         break;
601
602                 if (MID_lt(cevent->mid, sin->mid_uo))
603                         continue;
604                 if (MID_lt(sin->mid_uo, cevent->mid))
605                         break;
606
607                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
608                 case SCTP_DATA_FIRST_FRAG:
609                         goto out;
610                 case SCTP_DATA_MIDDLE_FRAG:
611                         if (!first_frag) {
612                                 if (cevent->fsn == sin->fsn_uo) {
613                                         first_frag = pos;
614                                         last_frag = pos;
615                                         next_fsn = cevent->fsn + 1;
616                                 }
617                         } else if (cevent->fsn == next_fsn) {
618                                 last_frag = pos;
619                                 next_fsn++;
620                         } else {
621                                 goto out;
622                         }
623                         break;
624                 case SCTP_DATA_LAST_FRAG:
625                         if (!first_frag) {
626                                 if (cevent->fsn == sin->fsn_uo) {
627                                         first_frag = pos;
628                                         last_frag = pos;
629                                         next_fsn = 0;
630                                         is_last = 1;
631                                 }
632                         } else if (cevent->fsn == next_fsn) {
633                                 last_frag = pos;
634                                 next_fsn = 0;
635                                 is_last = 1;
636                         }
637                         goto out;
638                 default:
639                         goto out;
640                 }
641         }
642
643 out:
644         if (!first_frag)
645                 return NULL;
646
647         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
648                                              &ulpq->reasm_uo, first_frag,
649                                              last_frag);
650         if (retval) {
651                 sin->fsn_uo = next_fsn;
652                 if (is_last) {
653                         retval->msg_flags |= MSG_EOR;
654                         sin->pd_mode_uo = 0;
655                 }
656         }
657
658         return retval;
659 }
660
661 static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
662                                                 struct sctp_ulpq *ulpq,
663                                                 struct sctp_ulpevent *event)
664 {
665         struct sctp_association *asoc = ulpq->asoc;
666         struct sk_buff *pos, *first_frag = NULL;
667         struct sctp_ulpevent *retval = NULL;
668         struct sk_buff *pd_first = NULL;
669         struct sk_buff *pd_last = NULL;
670         struct sctp_stream_in *sin;
671         __u32 next_fsn = 0;
672         __u32 pd_point = 0;
673         __u32 pd_len = 0;
674         __u32 mid = 0;
675
676         sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
677
678         skb_queue_walk(&ulpq->reasm_uo, pos) {
679                 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
680
681                 if (cevent->stream < event->stream)
682                         continue;
683                 if (cevent->stream > event->stream)
684                         break;
685
686                 if (MID_lt(cevent->mid, event->mid))
687                         continue;
688                 if (MID_lt(event->mid, cevent->mid))
689                         break;
690
691                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
692                 case SCTP_DATA_FIRST_FRAG:
693                         if (!sin->pd_mode_uo) {
694                                 sin->mid_uo = cevent->mid;
695                                 pd_first = pos;
696                                 pd_last = pos;
697                                 pd_len = pos->len;
698                         }
699
700                         first_frag = pos;
701                         next_fsn = 0;
702                         mid = cevent->mid;
703                         break;
704
705                 case SCTP_DATA_MIDDLE_FRAG:
706                         if (first_frag && cevent->mid == mid &&
707                             cevent->fsn == next_fsn) {
708                                 next_fsn++;
709                                 if (pd_first) {
710                                         pd_last = pos;
711                                         pd_len += pos->len;
712                                 }
713                         } else {
714                                 first_frag = NULL;
715                         }
716                         break;
717
718                 case SCTP_DATA_LAST_FRAG:
719                         if (first_frag && cevent->mid == mid &&
720                             cevent->fsn == next_fsn)
721                                 goto found;
722                         else
723                                 first_frag = NULL;
724                         break;
725                 }
726         }
727
728         if (!pd_first)
729                 goto out;
730
731         pd_point = sctp_sk(asoc->base.sk)->pd_point;
732         if (pd_point && pd_point <= pd_len) {
733                 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
734                                                      &ulpq->reasm_uo,
735                                                      pd_first, pd_last);
736                 if (retval) {
737                         sin->fsn_uo = next_fsn;
738                         sin->pd_mode_uo = 1;
739                 }
740         }
741         goto out;
742
743 found:
744         retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
745                                              &ulpq->reasm_uo,
746                                              first_frag, pos);
747         if (retval)
748                 retval->msg_flags |= MSG_EOR;
749
750 out:
751         return retval;
752 }
753
754 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
755                                                 struct sctp_ulpevent *event)
756 {
757         struct sctp_ulpevent *retval = NULL;
758         struct sctp_stream_in *sin;
759
760         if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
761                 event->msg_flags |= MSG_EOR;
762                 return event;
763         }
764
765         sctp_intl_store_reasm_uo(ulpq, event);
766
767         sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
768         if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
769             event->fsn == sin->fsn_uo)
770                 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
771
772         if (!retval)
773                 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
774
775         return retval;
776 }
777
778 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
779 {
780         struct sctp_stream_in *csin, *sin = NULL;
781         struct sk_buff *first_frag = NULL;
782         struct sk_buff *last_frag = NULL;
783         struct sctp_ulpevent *retval;
784         struct sk_buff *pos;
785         __u32 next_fsn = 0;
786         __u16 sid = 0;
787
788         skb_queue_walk(&ulpq->reasm_uo, pos) {
789                 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
790
791                 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
792                 if (csin->pd_mode_uo)
793                         continue;
794
795                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
796                 case SCTP_DATA_FIRST_FRAG:
797                         if (first_frag)
798                                 goto out;
799                         first_frag = pos;
800                         last_frag = pos;
801                         next_fsn = 0;
802                         sin = csin;
803                         sid = cevent->stream;
804                         sin->mid_uo = cevent->mid;
805                         break;
806                 case SCTP_DATA_MIDDLE_FRAG:
807                         if (!first_frag)
808                                 break;
809                         if (cevent->stream == sid &&
810                             cevent->mid == sin->mid_uo &&
811                             cevent->fsn == next_fsn) {
812                                 next_fsn++;
813                                 last_frag = pos;
814                         } else {
815                                 goto out;
816                         }
817                         break;
818                 case SCTP_DATA_LAST_FRAG:
819                         if (first_frag)
820                                 goto out;
821                         break;
822                 default:
823                         break;
824                 }
825         }
826
827         if (!first_frag)
828                 return NULL;
829
830 out:
831         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
832                                              &ulpq->reasm_uo, first_frag,
833                                              last_frag);
834         if (retval) {
835                 sin->fsn_uo = next_fsn;
836                 sin->pd_mode_uo = 1;
837         }
838
839         return retval;
840 }
841
842 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
843                                struct sctp_chunk *chunk, gfp_t gfp)
844 {
845         struct sctp_ulpevent *event;
846         struct sk_buff_head temp;
847         int event_eor = 0;
848
849         event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
850         if (!event)
851                 return -ENOMEM;
852
853         event->mid = ntohl(chunk->subh.idata_hdr->mid);
854         if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
855                 event->ppid = chunk->subh.idata_hdr->ppid;
856         else
857                 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
858
859         if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
860                 event = sctp_intl_reasm(ulpq, event);
861                 if (event && event->msg_flags & MSG_EOR) {
862                         skb_queue_head_init(&temp);
863                         __skb_queue_tail(&temp, sctp_event2skb(event));
864
865                         event = sctp_intl_order(ulpq, event);
866                 }
867         } else {
868                 event = sctp_intl_reasm_uo(ulpq, event);
869         }
870
871         if (event) {
872                 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
873                 sctp_enqueue_event(ulpq, event);
874         }
875
876         return event_eor;
877 }
878
879 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
880 {
881         struct sctp_stream_in *csin, *sin = NULL;
882         struct sk_buff *first_frag = NULL;
883         struct sk_buff *last_frag = NULL;
884         struct sctp_ulpevent *retval;
885         struct sk_buff *pos;
886         __u32 next_fsn = 0;
887         __u16 sid = 0;
888
889         skb_queue_walk(&ulpq->reasm, pos) {
890                 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
891
892                 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
893                 if (csin->pd_mode)
894                         continue;
895
896                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
897                 case SCTP_DATA_FIRST_FRAG:
898                         if (first_frag)
899                                 goto out;
900                         if (cevent->mid == csin->mid) {
901                                 first_frag = pos;
902                                 last_frag = pos;
903                                 next_fsn = 0;
904                                 sin = csin;
905                                 sid = cevent->stream;
906                         }
907                         break;
908                 case SCTP_DATA_MIDDLE_FRAG:
909                         if (!first_frag)
910                                 break;
911                         if (cevent->stream == sid &&
912                             cevent->mid == sin->mid &&
913                             cevent->fsn == next_fsn) {
914                                 next_fsn++;
915                                 last_frag = pos;
916                         } else {
917                                 goto out;
918                         }
919                         break;
920                 case SCTP_DATA_LAST_FRAG:
921                         if (first_frag)
922                                 goto out;
923                         break;
924                 default:
925                         break;
926                 }
927         }
928
929         if (!first_frag)
930                 return NULL;
931
932 out:
933         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
934                                              &ulpq->reasm, first_frag,
935                                              last_frag);
936         if (retval) {
937                 sin->fsn = next_fsn;
938                 sin->pd_mode = 1;
939         }
940
941         return retval;
942 }
943
944 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
945 {
946         struct sctp_ulpevent *event;
947
948         if (!skb_queue_empty(&ulpq->reasm)) {
949                 do {
950                         event = sctp_intl_retrieve_first(ulpq);
951                         if (event)
952                                 sctp_enqueue_event(ulpq, event);
953                 } while (event);
954         }
955
956         if (!skb_queue_empty(&ulpq->reasm_uo)) {
957                 do {
958                         event = sctp_intl_retrieve_first_uo(ulpq);
959                         if (event)
960                                 sctp_enqueue_event(ulpq, event);
961                 } while (event);
962         }
963 }
964
965 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
966                                gfp_t gfp)
967 {
968         struct sctp_association *asoc = ulpq->asoc;
969         __u32 freed = 0;
970         __u16 needed;
971
972         needed = ntohs(chunk->chunk_hdr->length) -
973                  sizeof(struct sctp_idata_chunk);
974
975         if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
976                 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
977                 if (freed < needed)
978                         freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
979                                                        needed);
980                 if (freed < needed)
981                         freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
982                                                        needed);
983         }
984
985         if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
986                 sctp_intl_start_pd(ulpq, gfp);
987
988         sk_mem_reclaim(asoc->base.sk);
989 }
990
991 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
992                                       __u32 mid, __u16 flags, gfp_t gfp)
993 {
994         struct sock *sk = ulpq->asoc->base.sk;
995         struct sctp_ulpevent *ev = NULL;
996
997         if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
998                                         SCTP_PARTIAL_DELIVERY_EVENT))
999                 return;
1000
1001         ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
1002                                       sid, mid, flags, gfp);
1003         if (ev) {
1004                 struct sctp_sock *sp = sctp_sk(sk);
1005
1006                 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1007
1008                 if (!sp->data_ready_signalled) {
1009                         sp->data_ready_signalled = 1;
1010                         sk->sk_data_ready(sk);
1011                 }
1012         }
1013 }
1014
1015 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1016 {
1017         struct sctp_stream *stream = &ulpq->asoc->stream;
1018         struct sctp_ulpevent *cevent, *event = NULL;
1019         struct sk_buff_head *lobby = &ulpq->lobby;
1020         struct sk_buff *pos, *tmp;
1021         struct sk_buff_head temp;
1022         __u16 csid;
1023         __u32 cmid;
1024
1025         skb_queue_head_init(&temp);
1026         sctp_skb_for_each(pos, lobby, tmp) {
1027                 cevent = (struct sctp_ulpevent *)pos->cb;
1028                 csid = cevent->stream;
1029                 cmid = cevent->mid;
1030
1031                 if (csid > sid)
1032                         break;
1033
1034                 if (csid < sid)
1035                         continue;
1036
1037                 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1038                         break;
1039
1040                 __skb_unlink(pos, lobby);
1041                 if (!event)
1042                         event = sctp_skb2event(pos);
1043
1044                 __skb_queue_tail(&temp, pos);
1045         }
1046
1047         if (!event && pos != (struct sk_buff *)lobby) {
1048                 cevent = (struct sctp_ulpevent *)pos->cb;
1049                 csid = cevent->stream;
1050                 cmid = cevent->mid;
1051
1052                 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1053                         sctp_mid_next(stream, in, csid);
1054                         __skb_unlink(pos, lobby);
1055                         __skb_queue_tail(&temp, pos);
1056                         event = sctp_skb2event(pos);
1057                 }
1058         }
1059
1060         if (event) {
1061                 sctp_intl_retrieve_ordered(ulpq, event);
1062                 sctp_enqueue_event(ulpq, event);
1063         }
1064 }
1065
1066 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1067 {
1068         struct sctp_stream *stream = &ulpq->asoc->stream;
1069         __u16 sid;
1070
1071         for (sid = 0; sid < stream->incnt; sid++) {
1072                 struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1073                 __u32 mid;
1074
1075                 if (sin->pd_mode_uo) {
1076                         sin->pd_mode_uo = 0;
1077
1078                         mid = sin->mid_uo;
1079                         sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1080                 }
1081
1082                 if (sin->pd_mode) {
1083                         sin->pd_mode = 0;
1084
1085                         mid = sin->mid;
1086                         sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1087                         sctp_mid_skip(stream, in, sid, mid);
1088
1089                         sctp_intl_reap_ordered(ulpq, sid);
1090                 }
1091         }
1092
1093         /* intl abort pd happens only when all data needs to be cleaned */
1094         sctp_ulpq_flush(ulpq);
1095 }
1096
1097 static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1098                                     int nskips, __be16 stream, __u8 flags)
1099 {
1100         int i;
1101
1102         for (i = 0; i < nskips; i++)
1103                 if (skiplist[i].stream == stream &&
1104                     skiplist[i].flags == flags)
1105                         return i;
1106
1107         return i;
1108 }
1109
1110 #define SCTP_FTSN_U_BIT 0x1
1111 static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1112 {
1113         struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1114         struct sctp_association *asoc = q->asoc;
1115         struct sctp_chunk *ftsn_chunk = NULL;
1116         struct list_head *lchunk, *temp;
1117         int nskips = 0, skip_pos;
1118         struct sctp_chunk *chunk;
1119         __u32 tsn;
1120
1121         if (!asoc->peer.prsctp_capable)
1122                 return;
1123
1124         if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1125                 asoc->adv_peer_ack_point = ctsn;
1126
1127         list_for_each_safe(lchunk, temp, &q->abandoned) {
1128                 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1129                 tsn = ntohl(chunk->subh.data_hdr->tsn);
1130
1131                 if (TSN_lte(tsn, ctsn)) {
1132                         list_del_init(lchunk);
1133                         sctp_chunk_free(chunk);
1134                 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1135                         __be16 sid = chunk->subh.idata_hdr->stream;
1136                         __be32 mid = chunk->subh.idata_hdr->mid;
1137                         __u8 flags = 0;
1138
1139                         if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1140                                 flags |= SCTP_FTSN_U_BIT;
1141
1142                         asoc->adv_peer_ack_point = tsn;
1143                         skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1144                                                      sid, flags);
1145                         ftsn_skip_arr[skip_pos].stream = sid;
1146                         ftsn_skip_arr[skip_pos].reserved = 0;
1147                         ftsn_skip_arr[skip_pos].flags = flags;
1148                         ftsn_skip_arr[skip_pos].mid = mid;
1149                         if (skip_pos == nskips)
1150                                 nskips++;
1151                         if (nskips == 10)
1152                                 break;
1153                 } else {
1154                         break;
1155                 }
1156         }
1157
1158         if (asoc->adv_peer_ack_point > ctsn)
1159                 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1160                                                nskips, &ftsn_skip_arr[0]);
1161
1162         if (ftsn_chunk) {
1163                 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1164                 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1165         }
1166 }
1167
1168 #define _sctp_walk_ifwdtsn(pos, chunk, end) \
1169         for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1170              (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1171
1172 #define sctp_walk_ifwdtsn(pos, ch) \
1173         _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1174                                         sizeof(struct sctp_ifwdtsn_chunk))
1175
1176 static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1177 {
1178         struct sctp_fwdtsn_skip *skip;
1179         __u16 incnt;
1180
1181         if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1182                 return false;
1183
1184         incnt = chunk->asoc->stream.incnt;
1185         sctp_walk_fwdtsn(skip, chunk)
1186                 if (ntohs(skip->stream) >= incnt)
1187                         return false;
1188
1189         return true;
1190 }
1191
1192 static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1193 {
1194         struct sctp_ifwdtsn_skip *skip;
1195         __u16 incnt;
1196
1197         if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1198                 return false;
1199
1200         incnt = chunk->asoc->stream.incnt;
1201         sctp_walk_ifwdtsn(skip, chunk)
1202                 if (ntohs(skip->stream) >= incnt)
1203                         return false;
1204
1205         return true;
1206 }
1207
1208 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1209 {
1210         /* Move the Cumulattive TSN Ack ahead. */
1211         sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1212         /* purge the fragmentation queue */
1213         sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1214         /* Abort any in progress partial delivery. */
1215         sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1216 }
1217
1218 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1219 {
1220         struct sk_buff *pos, *tmp;
1221
1222         skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1223                 struct sctp_ulpevent *event = sctp_skb2event(pos);
1224                 __u32 tsn = event->tsn;
1225
1226                 if (TSN_lte(tsn, ftsn)) {
1227                         __skb_unlink(pos, &ulpq->reasm);
1228                         sctp_ulpevent_free(event);
1229                 }
1230         }
1231
1232         skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1233                 struct sctp_ulpevent *event = sctp_skb2event(pos);
1234                 __u32 tsn = event->tsn;
1235
1236                 if (TSN_lte(tsn, ftsn)) {
1237                         __skb_unlink(pos, &ulpq->reasm_uo);
1238                         sctp_ulpevent_free(event);
1239                 }
1240         }
1241 }
1242
1243 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1244 {
1245         /* Move the Cumulattive TSN Ack ahead. */
1246         sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1247         /* purge the fragmentation queue */
1248         sctp_intl_reasm_flushtsn(ulpq, ftsn);
1249         /* abort only when it's for all data */
1250         if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1251                 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1252 }
1253
1254 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1255 {
1256         struct sctp_fwdtsn_skip *skip;
1257
1258         /* Walk through all the skipped SSNs */
1259         sctp_walk_fwdtsn(skip, chunk)
1260                 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1261 }
1262
1263 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1264                            __u8 flags)
1265 {
1266         struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1267         struct sctp_stream *stream  = &ulpq->asoc->stream;
1268
1269         if (flags & SCTP_FTSN_U_BIT) {
1270                 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1271                         sin->pd_mode_uo = 0;
1272                         sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1273                                                   GFP_ATOMIC);
1274                 }
1275                 return;
1276         }
1277
1278         if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1279                 return;
1280
1281         if (sin->pd_mode) {
1282                 sin->pd_mode = 0;
1283                 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1284         }
1285
1286         sctp_mid_skip(stream, in, sid, mid);
1287
1288         sctp_intl_reap_ordered(ulpq, sid);
1289 }
1290
1291 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1292 {
1293         struct sctp_ifwdtsn_skip *skip;
1294
1295         /* Walk through all the skipped MIDs and abort stream pd if possible */
1296         sctp_walk_ifwdtsn(skip, chunk)
1297                 sctp_intl_skip(ulpq, ntohs(skip->stream),
1298                                ntohl(skip->mid), skip->flags);
1299 }
1300
1301 static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1302         .data_chunk_len         = sizeof(struct sctp_data_chunk),
1303         .ftsn_chunk_len         = sizeof(struct sctp_fwdtsn_chunk),
1304         /* DATA process functions */
1305         .make_datafrag          = sctp_make_datafrag_empty,
1306         .assign_number          = sctp_chunk_assign_ssn,
1307         .validate_data          = sctp_validate_data,
1308         .ulpevent_data          = sctp_ulpq_tail_data,
1309         .enqueue_event          = sctp_ulpq_tail_event,
1310         .renege_events          = sctp_ulpq_renege,
1311         .start_pd               = sctp_ulpq_partial_delivery,
1312         .abort_pd               = sctp_ulpq_abort_pd,
1313         /* FORWARD-TSN process functions */
1314         .generate_ftsn          = sctp_generate_fwdtsn,
1315         .validate_ftsn          = sctp_validate_fwdtsn,
1316         .report_ftsn            = sctp_report_fwdtsn,
1317         .handle_ftsn            = sctp_handle_fwdtsn,
1318 };
1319
1320 static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1321         .data_chunk_len         = sizeof(struct sctp_idata_chunk),
1322         .ftsn_chunk_len         = sizeof(struct sctp_ifwdtsn_chunk),
1323         /* I-DATA process functions */
1324         .make_datafrag          = sctp_make_idatafrag_empty,
1325         .assign_number          = sctp_chunk_assign_mid,
1326         .validate_data          = sctp_validate_idata,
1327         .ulpevent_data          = sctp_ulpevent_idata,
1328         .enqueue_event          = sctp_enqueue_event,
1329         .renege_events          = sctp_renege_events,
1330         .start_pd               = sctp_intl_start_pd,
1331         .abort_pd               = sctp_intl_abort_pd,
1332         /* I-FORWARD-TSN process functions */
1333         .generate_ftsn          = sctp_generate_iftsn,
1334         .validate_ftsn          = sctp_validate_iftsn,
1335         .report_ftsn            = sctp_report_iftsn,
1336         .handle_ftsn            = sctp_handle_iftsn,
1337 };
1338
1339 void sctp_stream_interleave_init(struct sctp_stream *stream)
1340 {
1341         struct sctp_association *asoc;
1342
1343         asoc = container_of(stream, struct sctp_association, stream);
1344         stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
1345                                        : &sctp_stream_interleave_0;
1346 }