~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sctp/ulpqueue.c

Version: ~ [ linux-5.4-rc3 ] ~ [ linux-5.3.6 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.79 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.149 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.196 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.196 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.75 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SCTP kernel implementation
  2  * (C) Copyright IBM Corp. 2001, 2004
  3  * Copyright (c) 1999-2000 Cisco, Inc.
  4  * Copyright (c) 1999-2001 Motorola, Inc.
  5  * Copyright (c) 2001 Intel Corp.
  6  * Copyright (c) 2001 Nokia, Inc.
  7  * Copyright (c) 2001 La Monte H.P. Yarroll
  8  *
  9  * This abstraction carries sctp events to the ULP (sockets).
 10  *
 11  * This SCTP implementation is free software;
 12  * you can redistribute it and/or modify it under the terms of
 13  * the GNU General Public License as published by
 14  * the Free Software Foundation; either version 2, or (at your option)
 15  * any later version.
 16  *
 17  * This SCTP implementation is distributed in the hope that it
 18  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 19  *                 ************************
 20  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 21  * See the GNU General Public License for more details.
 22  *
 23  * You should have received a copy of the GNU General Public License
 24  * along with GNU CC; see the file COPYING.  If not, see
 25  * <http://www.gnu.org/licenses/>.
 26  *
 27  * Please send any bug reports or fixes you make to the
 28  * email address(es):
 29  *    lksctp developers <linux-sctp@vger.kernel.org>
 30  *
 31  * Written or modified by:
 32  *    Jon Grimm             <jgrimm@us.ibm.com>
 33  *    La Monte H.P. Yarroll <piggy@acm.org>
 34  *    Sridhar Samudrala     <sri@us.ibm.com>
 35  */
 36 
 37 #include <linux/slab.h>
 38 #include <linux/types.h>
 39 #include <linux/skbuff.h>
 40 #include <net/sock.h>
 41 #include <net/busy_poll.h>
 42 #include <net/sctp/structs.h>
 43 #include <net/sctp/sctp.h>
 44 #include <net/sctp/sm.h>
 45 
 46 /* Forward declarations for internal helpers.  */
 47 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 48                                               struct sctp_ulpevent *);
 49 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
 50                                               struct sctp_ulpevent *);
 51 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
 52 
 53 /* 1st Level Abstractions */
 54 
 55 /* Initialize a ULP queue from a block of memory.  */
 56 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
 57                                  struct sctp_association *asoc)
 58 {
 59         memset(ulpq, 0, sizeof(struct sctp_ulpq));
 60 
 61         ulpq->asoc = asoc;
 62         skb_queue_head_init(&ulpq->reasm);
 63         skb_queue_head_init(&ulpq->lobby);
 64         ulpq->pd_mode  = 0;
 65 
 66         return ulpq;
 67 }
 68 
 69 
 70 /* Flush the reassembly and ordering queues.  */
 71 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
 72 {
 73         struct sk_buff *skb;
 74         struct sctp_ulpevent *event;
 75 
 76         while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
 77                 event = sctp_skb2event(skb);
 78                 sctp_ulpevent_free(event);
 79         }
 80 
 81         while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
 82                 event = sctp_skb2event(skb);
 83                 sctp_ulpevent_free(event);
 84         }
 85 
 86 }
 87 
 88 /* Dispose of a ulpqueue.  */
 89 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
 90 {
 91         sctp_ulpq_flush(ulpq);
 92 }
 93 
 94 /* Process an incoming DATA chunk.  */
 95 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 96                         gfp_t gfp)
 97 {
 98         struct sk_buff_head temp;
 99         struct sctp_ulpevent *event;
100         int event_eor = 0;
101 
102         /* Create an event from the incoming chunk. */
103         event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
104         if (!event)
105                 return -ENOMEM;
106 
107         /* Do reassembly if needed.  */
108         event = sctp_ulpq_reasm(ulpq, event);
109 
110         /* Do ordering if needed.  */
111         if ((event) && (event->msg_flags & MSG_EOR)) {
112                 /* Create a temporary list to collect chunks on.  */
113                 skb_queue_head_init(&temp);
114                 __skb_queue_tail(&temp, sctp_event2skb(event));
115 
116                 event = sctp_ulpq_order(ulpq, event);
117         }
118 
119         /* Send event to the ULP.  'event' is the sctp_ulpevent for
120          * very first SKB on the 'temp' list.
121          */
122         if (event) {
123                 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
124                 sctp_ulpq_tail_event(ulpq, event);
125         }
126 
127         return event_eor;
128 }
129 
130 /* Add a new event for propagation to the ULP.  */
131 /* Clear the partial delivery mode for this socket.   Note: This
132  * assumes that no association is currently in partial delivery mode.
133  */
134 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
135 {
136         struct sctp_sock *sp = sctp_sk(sk);
137 
138         if (atomic_dec_and_test(&sp->pd_mode)) {
139                 /* This means there are no other associations in PD, so
140                  * we can go ahead and clear out the lobby in one shot
141                  */
142                 if (!skb_queue_empty(&sp->pd_lobby)) {
143                         struct list_head *list;
144                         sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
145                         list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
146                         INIT_LIST_HEAD(list);
147                         return 1;
148                 }
149         } else {
150                 /* There are other associations in PD, so we only need to
151                  * pull stuff out of the lobby that belongs to the
152                  * associations that is exiting PD (all of its notifications
153                  * are posted here).
154                  */
155                 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
156                         struct sk_buff *skb, *tmp;
157                         struct sctp_ulpevent *event;
158 
159                         sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
160                                 event = sctp_skb2event(skb);
161                                 if (event->asoc == asoc) {
162                                         __skb_unlink(skb, &sp->pd_lobby);
163                                         __skb_queue_tail(&sk->sk_receive_queue,
164                                                          skb);
165                                 }
166                         }
167                 }
168         }
169 
170         return 0;
171 }
172 
173 /* Set the pd_mode on the socket and ulpq */
174 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
175 {
176         struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
177 
178         atomic_inc(&sp->pd_mode);
179         ulpq->pd_mode = 1;
180 }
181 
182 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
183 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
184 {
185         ulpq->pd_mode = 0;
186         sctp_ulpq_reasm_drain(ulpq);
187         return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
188 }
189 
190 /* If the SKB of 'event' is on a list, it is the first such member
191  * of that list.
192  */
193 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
194 {
195         struct sock *sk = ulpq->asoc->base.sk;
196         struct sk_buff_head *queue, *skb_list;
197         struct sk_buff *skb = sctp_event2skb(event);
198         int clear_pd = 0;
199 
200         skb_list = (struct sk_buff_head *) skb->prev;
201 
202         /* If the socket is just going to throw this away, do not
203          * even try to deliver it.
204          */
205         if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
206                 goto out_free;
207 
208         if (!sctp_ulpevent_is_notification(event)) {
209                 sk_mark_napi_id(sk, skb);
210                 sk_incoming_cpu_update(sk);
211         }
212         /* Check if the user wishes to receive this event.  */
213         if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
214                 goto out_free;
215 
216         /* If we are in partial delivery mode, post to the lobby until
217          * partial delivery is cleared, unless, of course _this_ is
218          * the association the cause of the partial delivery.
219          */
220 
221         if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
222                 queue = &sk->sk_receive_queue;
223         } else {
224                 if (ulpq->pd_mode) {
225                         /* If the association is in partial delivery, we
226                          * need to finish delivering the partially processed
227                          * packet before passing any other data.  This is
228                          * because we don't truly support stream interleaving.
229                          */
230                         if ((event->msg_flags & MSG_NOTIFICATION) ||
231                             (SCTP_DATA_NOT_FRAG ==
232                                     (event->msg_flags & SCTP_DATA_FRAG_MASK)))
233                                 queue = &sctp_sk(sk)->pd_lobby;
234                         else {
235                                 clear_pd = event->msg_flags & MSG_EOR;
236                                 queue = &sk->sk_receive_queue;
237                         }
238                 } else {
239                         /*
240                          * If fragment interleave is enabled, we
241                          * can queue this to the receive queue instead
242                          * of the lobby.
243                          */
244                         if (sctp_sk(sk)->frag_interleave)
245                                 queue = &sk->sk_receive_queue;
246                         else
247                                 queue = &sctp_sk(sk)->pd_lobby;
248                 }
249         }
250 
251         /* If we are harvesting multiple skbs they will be
252          * collected on a list.
253          */
254         if (skb_list)
255                 sctp_skb_list_tail(skb_list, queue);
256         else
257                 __skb_queue_tail(queue, skb);
258 
259         /* Did we just complete partial delivery and need to get
260          * rolling again?  Move pending data to the receive
261          * queue.
262          */
263         if (clear_pd)
264                 sctp_ulpq_clear_pd(ulpq);
265 
266         if (queue == &sk->sk_receive_queue)
267                 sk->sk_data_ready(sk);
268         return 1;
269 
270 out_free:
271         if (skb_list)
272                 sctp_queue_purge_ulpevents(skb_list);
273         else
274                 sctp_ulpevent_free(event);
275 
276         return 0;
277 }
278 
279 /* 2nd Level Abstractions */
280 
281 /* Helper function to store chunks that need to be reassembled.  */
282 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
283                                          struct sctp_ulpevent *event)
284 {
285         struct sk_buff *pos;
286         struct sctp_ulpevent *cevent;
287         __u32 tsn, ctsn;
288 
289         tsn = event->tsn;
290 
291         /* See if it belongs at the end. */
292         pos = skb_peek_tail(&ulpq->reasm);
293         if (!pos) {
294                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
295                 return;
296         }
297 
298         /* Short circuit just dropping it at the end. */
299         cevent = sctp_skb2event(pos);
300         ctsn = cevent->tsn;
301         if (TSN_lt(ctsn, tsn)) {
302                 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
303                 return;
304         }
305 
306         /* Find the right place in this list. We store them by TSN.  */
307         skb_queue_walk(&ulpq->reasm, pos) {
308                 cevent = sctp_skb2event(pos);
309                 ctsn = cevent->tsn;
310 
311                 if (TSN_lt(tsn, ctsn))
312                         break;
313         }
314 
315         /* Insert before pos. */
316         __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
317 
318 }
319 
320 /* Helper function to return an event corresponding to the reassembled
321  * datagram.
322  * This routine creates a re-assembled skb given the first and last skb's
323  * as stored in the reassembly queue. The skb's may be non-linear if the sctp
324  * payload was fragmented on the way and ip had to reassemble them.
325  * We add the rest of skb's to the first skb's fraglist.
326  */
327 static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
328         struct sk_buff_head *queue, struct sk_buff *f_frag,
329         struct sk_buff *l_frag)
330 {
331         struct sk_buff *pos;
332         struct sk_buff *new = NULL;
333         struct sctp_ulpevent *event;
334         struct sk_buff *pnext, *last;
335         struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
336 
337         /* Store the pointer to the 2nd skb */
338         if (f_frag == l_frag)
339                 pos = NULL;
340         else
341                 pos = f_frag->next;
342 
343         /* Get the last skb in the f_frag's frag_list if present. */
344         for (last = list; list; last = list, list = list->next)
345                 ;
346 
347         /* Add the list of remaining fragments to the first fragments
348          * frag_list.
349          */
350         if (last)
351                 last->next = pos;
352         else {
353                 if (skb_cloned(f_frag)) {
354                         /* This is a cloned skb, we can't just modify
355                          * the frag_list.  We need a new skb to do that.
356                          * Instead of calling skb_unshare(), we'll do it
357                          * ourselves since we need to delay the free.
358                          */
359                         new = skb_copy(f_frag, GFP_ATOMIC);
360                         if (!new)
361                                 return NULL;    /* try again later */
362 
363                         sctp_skb_set_owner_r(new, f_frag->sk);
364 
365                         skb_shinfo(new)->frag_list = pos;
366                 } else
367                         skb_shinfo(f_frag)->frag_list = pos;
368         }
369 
370         /* Remove the first fragment from the reassembly queue.  */
371         __skb_unlink(f_frag, queue);
372 
373         /* if we did unshare, then free the old skb and re-assign */
374         if (new) {
375                 kfree_skb(f_frag);
376                 f_frag = new;
377         }
378 
379         while (pos) {
380 
381                 pnext = pos->next;
382 
383                 /* Update the len and data_len fields of the first fragment. */
384                 f_frag->len += pos->len;
385                 f_frag->data_len += pos->len;
386 
387                 /* Remove the fragment from the reassembly queue.  */
388                 __skb_unlink(pos, queue);
389 
390                 /* Break if we have reached the last fragment.  */
391                 if (pos == l_frag)
392                         break;
393                 pos->next = pnext;
394                 pos = pnext;
395         }
396 
397         event = sctp_skb2event(f_frag);
398         SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
399 
400         return event;
401 }
402 
403 
404 /* Helper function to check if an incoming chunk has filled up the last
405  * missing fragment in a SCTP datagram and return the corresponding event.
406  */
407 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
408 {
409         struct sk_buff *pos;
410         struct sctp_ulpevent *cevent;
411         struct sk_buff *first_frag = NULL;
412         __u32 ctsn, next_tsn;
413         struct sctp_ulpevent *retval = NULL;
414         struct sk_buff *pd_first = NULL;
415         struct sk_buff *pd_last = NULL;
416         size_t pd_len = 0;
417         struct sctp_association *asoc;
418         u32 pd_point;
419 
420         /* Initialized to 0 just to avoid compiler warning message.  Will
421          * never be used with this value. It is referenced only after it
422          * is set when we find the first fragment of a message.
423          */
424         next_tsn = 0;
425 
426         /* The chunks are held in the reasm queue sorted by TSN.
427          * Walk through the queue sequentially and look for a sequence of
428          * fragmented chunks that complete a datagram.
429          * 'first_frag' and next_tsn are reset when we find a chunk which
430          * is the first fragment of a datagram. Once these 2 fields are set
431          * we expect to find the remaining middle fragments and the last
432          * fragment in order. If not, first_frag is reset to NULL and we
433          * start the next pass when we find another first fragment.
434          *
435          * There is a potential to do partial delivery if user sets
436          * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
437          * to see if can do PD.
438          */
439         skb_queue_walk(&ulpq->reasm, pos) {
440                 cevent = sctp_skb2event(pos);
441                 ctsn = cevent->tsn;
442 
443                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
444                 case SCTP_DATA_FIRST_FRAG:
445                         /* If this "FIRST_FRAG" is the first
446                          * element in the queue, then count it towards
447                          * possible PD.
448                          */
449                         if (pos == ulpq->reasm.next) {
450                             pd_first = pos;
451                             pd_last = pos;
452                             pd_len = pos->len;
453                         } else {
454                             pd_first = NULL;
455                             pd_last = NULL;
456                             pd_len = 0;
457                         }
458 
459                         first_frag = pos;
460                         next_tsn = ctsn + 1;
461                         break;
462 
463                 case SCTP_DATA_MIDDLE_FRAG:
464                         if ((first_frag) && (ctsn == next_tsn)) {
465                                 next_tsn++;
466                                 if (pd_first) {
467                                     pd_last = pos;
468                                     pd_len += pos->len;
469                                 }
470                         } else
471                                 first_frag = NULL;
472                         break;
473 
474                 case SCTP_DATA_LAST_FRAG:
475                         if (first_frag && (ctsn == next_tsn))
476                                 goto found;
477                         else
478                                 first_frag = NULL;
479                         break;
480                 }
481         }
482 
483         asoc = ulpq->asoc;
484         if (pd_first) {
485                 /* Make sure we can enter partial deliver.
486                  * We can trigger partial delivery only if framgent
487                  * interleave is set, or the socket is not already
488                  * in  partial delivery.
489                  */
490                 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
491                     atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
492                         goto done;
493 
494                 cevent = sctp_skb2event(pd_first);
495                 pd_point = sctp_sk(asoc->base.sk)->pd_point;
496                 if (pd_point && pd_point <= pd_len) {
497                         retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
498                                                              &ulpq->reasm,
499                                                              pd_first,
500                                                              pd_last);
501                         if (retval)
502                                 sctp_ulpq_set_pd(ulpq);
503                 }
504         }
505 done:
506         return retval;
507 found:
508         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
509                                              &ulpq->reasm, first_frag, pos);
510         if (retval)
511                 retval->msg_flags |= MSG_EOR;
512         goto done;
513 }
514 
515 /* Retrieve the next set of fragments of a partial message. */
516 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
517 {
518         struct sk_buff *pos, *last_frag, *first_frag;
519         struct sctp_ulpevent *cevent;
520         __u32 ctsn, next_tsn;
521         int is_last;
522         struct sctp_ulpevent *retval;
523 
524         /* The chunks are held in the reasm queue sorted by TSN.
525          * Walk through the queue sequentially and look for the first
526          * sequence of fragmented chunks.
527          */
528 
529         if (skb_queue_empty(&ulpq->reasm))
530                 return NULL;
531 
532         last_frag = first_frag = NULL;
533         retval = NULL;
534         next_tsn = 0;
535         is_last = 0;
536 
537         skb_queue_walk(&ulpq->reasm, pos) {
538                 cevent = sctp_skb2event(pos);
539                 ctsn = cevent->tsn;
540 
541                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
542                 case SCTP_DATA_FIRST_FRAG:
543                         if (!first_frag)
544                                 return NULL;
545                         goto done;
546                 case SCTP_DATA_MIDDLE_FRAG:
547                         if (!first_frag) {
548                                 first_frag = pos;
549                                 next_tsn = ctsn + 1;
550                                 last_frag = pos;
551                         } else if (next_tsn == ctsn) {
552                                 next_tsn++;
553                                 last_frag = pos;
554                         } else
555                                 goto done;
556                         break;
557                 case SCTP_DATA_LAST_FRAG:
558                         if (!first_frag)
559                                 first_frag = pos;
560                         else if (ctsn != next_tsn)
561                                 goto done;
562                         last_frag = pos;
563                         is_last = 1;
564                         goto done;
565                 default:
566                         return NULL;
567                 }
568         }
569 
570         /* We have the reassembled event. There is no need to look
571          * further.
572          */
573 done:
574         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
575                                         &ulpq->reasm, first_frag, last_frag);
576         if (retval && is_last)
577                 retval->msg_flags |= MSG_EOR;
578 
579         return retval;
580 }
581 
582 
583 /* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
584  * need reassembling.
585  */
586 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
587                                                 struct sctp_ulpevent *event)
588 {
589         struct sctp_ulpevent *retval = NULL;
590 
591         /* Check if this is part of a fragmented message.  */
592         if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
593                 event->msg_flags |= MSG_EOR;
594                 return event;
595         }
596 
597         sctp_ulpq_store_reasm(ulpq, event);
598         if (!ulpq->pd_mode)
599                 retval = sctp_ulpq_retrieve_reassembled(ulpq);
600         else {
601                 __u32 ctsn, ctsnap;
602 
603                 /* Do not even bother unless this is the next tsn to
604                  * be delivered.
605                  */
606                 ctsn = event->tsn;
607                 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
608                 if (TSN_lte(ctsn, ctsnap))
609                         retval = sctp_ulpq_retrieve_partial(ulpq);
610         }
611 
612         return retval;
613 }
614 
615 /* Retrieve the first part (sequential fragments) for partial delivery.  */
616 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
617 {
618         struct sk_buff *pos, *last_frag, *first_frag;
619         struct sctp_ulpevent *cevent;
620         __u32 ctsn, next_tsn;
621         struct sctp_ulpevent *retval;
622 
623         /* The chunks are held in the reasm queue sorted by TSN.
624          * Walk through the queue sequentially and look for a sequence of
625          * fragmented chunks that start a datagram.
626          */
627 
628         if (skb_queue_empty(&ulpq->reasm))
629                 return NULL;
630 
631         last_frag = first_frag = NULL;
632         retval = NULL;
633         next_tsn = 0;
634 
635         skb_queue_walk(&ulpq->reasm, pos) {
636                 cevent = sctp_skb2event(pos);
637                 ctsn = cevent->tsn;
638 
639                 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
640                 case SCTP_DATA_FIRST_FRAG:
641                         if (!first_frag) {
642                                 first_frag = pos;
643                                 next_tsn = ctsn + 1;
644                                 last_frag = pos;
645                         } else
646                                 goto done;
647                         break;
648 
649                 case SCTP_DATA_MIDDLE_FRAG:
650                         if (!first_frag)
651                                 return NULL;
652                         if (ctsn == next_tsn) {
653                                 next_tsn++;
654                                 last_frag = pos;
655                         } else
656                                 goto done;
657                         break;
658 
659                 case SCTP_DATA_LAST_FRAG:
660                         if (!first_frag)
661                                 return NULL;
662                         else
663                                 goto done;
664                         break;
665 
666                 default:
667                         return NULL;
668                 }
669         }
670 
671         /* We have the reassembled event. There is no need to look
672          * further.
673          */
674 done:
675         retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
676                                         &ulpq->reasm, first_frag, last_frag);
677         return retval;
678 }
679 
680 /*
681  * Flush out stale fragments from the reassembly queue when processing
682  * a Forward TSN.
683  *
684  * RFC 3758, Section 3.6
685  *
686  * After receiving and processing a FORWARD TSN, the data receiver MUST
687  * take cautions in updating its re-assembly queue.  The receiver MUST
688  * remove any partially reassembled message, which is still missing one
689  * or more TSNs earlier than or equal to the new cumulative TSN point.
690  * In the event that the receiver has invoked the partial delivery API,
691  * a notification SHOULD also be generated to inform the upper layer API
692  * that the message being partially delivered will NOT be completed.
693  */
694 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
695 {
696         struct sk_buff *pos, *tmp;
697         struct sctp_ulpevent *event;
698         __u32 tsn;
699 
700         if (skb_queue_empty(&ulpq->reasm))
701                 return;
702 
703         skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
704                 event = sctp_skb2event(pos);
705                 tsn = event->tsn;
706 
707                 /* Since the entire message must be abandoned by the
708                  * sender (item A3 in Section 3.5, RFC 3758), we can
709                  * free all fragments on the list that are less then
710                  * or equal to ctsn_point
711                  */
712                 if (TSN_lte(tsn, fwd_tsn)) {
713                         __skb_unlink(pos, &ulpq->reasm);
714                         sctp_ulpevent_free(event);
715                 } else
716                         break;
717         }
718 }
719 
720 /*
721  * Drain the reassembly queue.  If we just cleared parted delivery, it
722  * is possible that the reassembly queue will contain already reassembled
723  * messages.  Retrieve any such messages and give them to the user.
724  */
725 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
726 {
727         struct sctp_ulpevent *event = NULL;
728         struct sk_buff_head temp;
729 
730         if (skb_queue_empty(&ulpq->reasm))
731                 return;
732 
733         while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
734                 /* Do ordering if needed.  */
735                 if ((event) && (event->msg_flags & MSG_EOR)) {
736                         skb_queue_head_init(&temp);
737                         __skb_queue_tail(&temp, sctp_event2skb(event));
738 
739                         event = sctp_ulpq_order(ulpq, event);
740                 }
741 
742                 /* Send event to the ULP.  'event' is the
743                  * sctp_ulpevent for  very first SKB on the  temp' list.
744                  */
745                 if (event)
746                         sctp_ulpq_tail_event(ulpq, event);
747         }
748 }
749 
750 
751 /* Helper function to gather skbs that have possibly become
752  * ordered by an an incoming chunk.
753  */
754 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
755                                               struct sctp_ulpevent *event)
756 {
757         struct sk_buff_head *event_list;
758         struct sk_buff *pos, *tmp;
759         struct sctp_ulpevent *cevent;
760         struct sctp_stream *in;
761         __u16 sid, csid, cssn;
762 
763         sid = event->stream;
764         in  = &ulpq->asoc->ssnmap->in;
765 
766         event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
767 
768         /* We are holding the chunks by stream, by SSN.  */
769         sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
770                 cevent = (struct sctp_ulpevent *) pos->cb;
771                 csid = cevent->stream;
772                 cssn = cevent->ssn;
773 
774                 /* Have we gone too far?  */
775                 if (csid > sid)
776                         break;
777 
778                 /* Have we not gone far enough?  */
779                 if (csid < sid)
780                         continue;
781 
782                 if (cssn != sctp_ssn_peek(in, sid))
783                         break;
784 
785                 /* Found it, so mark in the ssnmap. */
786                 sctp_ssn_next(in, sid);
787 
788                 __skb_unlink(pos, &ulpq->lobby);
789 
790                 /* Attach all gathered skbs to the event.  */
791                 __skb_queue_tail(event_list, pos);
792         }
793 }
794 
795 /* Helper function to store chunks needing ordering.  */
796 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
797                                            struct sctp_ulpevent *event)
798 {
799         struct sk_buff *pos;
800         struct sctp_ulpevent *cevent;
801         __u16 sid, csid;
802         __u16 ssn, cssn;
803 
804         pos = skb_peek_tail(&ulpq->lobby);
805         if (!pos) {
806                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
807                 return;
808         }
809 
810         sid = event->stream;
811         ssn = event->ssn;
812 
813         cevent = (struct sctp_ulpevent *) pos->cb;
814         csid = cevent->stream;
815         cssn = cevent->ssn;
816         if (sid > csid) {
817                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
818                 return;
819         }
820 
821         if ((sid == csid) && SSN_lt(cssn, ssn)) {
822                 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
823                 return;
824         }
825 
826         /* Find the right place in this list.  We store them by
827          * stream ID and then by SSN.
828          */
829         skb_queue_walk(&ulpq->lobby, pos) {
830                 cevent = (struct sctp_ulpevent *) pos->cb;
831                 csid = cevent->stream;
832                 cssn = cevent->ssn;
833 
834                 if (csid > sid)
835                         break;
836                 if (csid == sid && SSN_lt(ssn, cssn))
837                         break;
838         }
839 
840 
841         /* Insert before pos. */
842         __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
843 }
844 
845 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
846                                              struct sctp_ulpevent *event)
847 {
848         __u16 sid, ssn;
849         struct sctp_stream *in;
850 
851         /* Check if this message needs ordering.  */
852         if (SCTP_DATA_UNORDERED & event->msg_flags)
853                 return event;
854 
855         /* Note: The stream ID must be verified before this routine.  */
856         sid = event->stream;
857         ssn = event->ssn;
858         in  = &ulpq->asoc->ssnmap->in;
859 
860         /* Is this the expected SSN for this stream ID?  */
861         if (ssn != sctp_ssn_peek(in, sid)) {
862                 /* We've received something out of order, so find where it
863                  * needs to be placed.  We order by stream and then by SSN.
864                  */
865                 sctp_ulpq_store_ordered(ulpq, event);
866                 return NULL;
867         }
868 
869         /* Mark that the next chunk has been found.  */
870         sctp_ssn_next(in, sid);
871 
872         /* Go find any other chunks that were waiting for
873          * ordering.
874          */
875         sctp_ulpq_retrieve_ordered(ulpq, event);
876 
877         return event;
878 }
879 
880 /* Helper function to gather skbs that have possibly become
881  * ordered by forward tsn skipping their dependencies.
882  */
883 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
884 {
885         struct sk_buff *pos, *tmp;
886         struct sctp_ulpevent *cevent;
887         struct sctp_ulpevent *event;
888         struct sctp_stream *in;
889         struct sk_buff_head temp;
890         struct sk_buff_head *lobby = &ulpq->lobby;
891         __u16 csid, cssn;
892 
893         in  = &ulpq->asoc->ssnmap->in;
894 
895         /* We are holding the chunks by stream, by SSN.  */
896         skb_queue_head_init(&temp);
897         event = NULL;
898         sctp_skb_for_each(pos, lobby, tmp) {
899                 cevent = (struct sctp_ulpevent *) pos->cb;
900                 csid = cevent->stream;
901                 cssn = cevent->ssn;
902 
903                 /* Have we gone too far?  */
904                 if (csid > sid)
905                         break;
906 
907                 /* Have we not gone far enough?  */
908                 if (csid < sid)
909                         continue;
910 
911                 /* see if this ssn has been marked by skipping */
912                 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
913                         break;
914 
915                 __skb_unlink(pos, lobby);
916                 if (!event)
917                         /* Create a temporary list to collect chunks on.  */
918                         event = sctp_skb2event(pos);
919 
920                 /* Attach all gathered skbs to the event.  */
921                 __skb_queue_tail(&temp, pos);
922         }
923 
924         /* If we didn't reap any data, see if the next expected SSN
925          * is next on the queue and if so, use that.
926          */
927         if (event == NULL && pos != (struct sk_buff *)lobby) {
928                 cevent = (struct sctp_ulpevent *) pos->cb;
929                 csid = cevent->stream;
930                 cssn = cevent->ssn;
931 
932                 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
933                         sctp_ssn_next(in, csid);
934                         __skb_unlink(pos, lobby);
935                         __skb_queue_tail(&temp, pos);
936                         event = sctp_skb2event(pos);
937                 }
938         }
939 
940         /* Send event to the ULP.  'event' is the sctp_ulpevent for
941          * very first SKB on the 'temp' list.
942          */
943         if (event) {
944                 /* see if we have more ordered that we can deliver */
945                 sctp_ulpq_retrieve_ordered(ulpq, event);
946                 sctp_ulpq_tail_event(ulpq, event);
947         }
948 }
949 
950 /* Skip over an SSN. This is used during the processing of
951  * Forwared TSN chunk to skip over the abandoned ordered data
952  */
953 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
954 {
955         struct sctp_stream *in;
956 
957         /* Note: The stream ID must be verified before this routine.  */
958         in  = &ulpq->asoc->ssnmap->in;
959 
960         /* Is this an old SSN?  If so ignore. */
961         if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
962                 return;
963 
964         /* Mark that we are no longer expecting this SSN or lower. */
965         sctp_ssn_skip(in, sid, ssn);
966 
967         /* Go find any other chunks that were waiting for
968          * ordering and deliver them if needed.
969          */
970         sctp_ulpq_reap_ordered(ulpq, sid);
971 }
972 
973 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
974                 struct sk_buff_head *list, __u16 needed)
975 {
976         __u16 freed = 0;
977         __u32 tsn, last_tsn;
978         struct sk_buff *skb, *flist, *last;
979         struct sctp_ulpevent *event;
980         struct sctp_tsnmap *tsnmap;
981 
982         tsnmap = &ulpq->asoc->peer.tsn_map;
983 
984         while ((skb = skb_peek_tail(list)) != NULL) {
985                 event = sctp_skb2event(skb);
986                 tsn = event->tsn;
987 
988                 /* Don't renege below the Cumulative TSN ACK Point. */
989                 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
990                         break;
991 
992                 /* Events in ordering queue may have multiple fragments
993                  * corresponding to additional TSNs.  Sum the total
994                  * freed space; find the last TSN.
995                  */
996                 freed += skb_headlen(skb);
997                 flist = skb_shinfo(skb)->frag_list;
998                 for (last = flist; flist; flist = flist->next) {
999                         last = flist;
1000                         freed += skb_headlen(last);
1001                 }
1002                 if (last)
1003                         last_tsn = sctp_skb2event(last)->tsn;
1004                 else
1005                         last_tsn = tsn;
1006 
1007                 /* Unlink the event, then renege all applicable TSNs. */
1008                 __skb_unlink(skb, list);
1009                 sctp_ulpevent_free(event);
1010                 while (TSN_lte(tsn, last_tsn)) {
1011                         sctp_tsnmap_renege(tsnmap, tsn);
1012                         tsn++;
1013                 }
1014                 if (freed >= needed)
1015                         return freed;
1016         }
1017 
1018         return freed;
1019 }
1020 
1021 /* Renege 'needed' bytes from the ordering queue. */
1022 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1023 {
1024         return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1025 }
1026 
1027 /* Renege 'needed' bytes from the reassembly queue. */
1028 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1029 {
1030         return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1031 }
1032 
1033 /* Partial deliver the first message as there is pressure on rwnd. */
1034 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1035                                 gfp_t gfp)
1036 {
1037         struct sctp_ulpevent *event;
1038         struct sctp_association *asoc;
1039         struct sctp_sock *sp;
1040         __u32 ctsn;
1041         struct sk_buff *skb;
1042 
1043         asoc = ulpq->asoc;
1044         sp = sctp_sk(asoc->base.sk);
1045 
1046         /* If the association is already in Partial Delivery mode
1047          * we have nothing to do.
1048          */
1049         if (ulpq->pd_mode)
1050                 return;
1051 
1052         /* Data must be at or below the Cumulative TSN ACK Point to
1053          * start partial delivery.
1054          */
1055         skb = skb_peek(&asoc->ulpq.reasm);
1056         if (skb != NULL) {
1057                 ctsn = sctp_skb2event(skb)->tsn;
1058                 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1059                         return;
1060         }
1061 
1062         /* If the user enabled fragment interleave socket option,
1063          * multiple associations can enter partial delivery.
1064          * Otherwise, we can only enter partial delivery if the
1065          * socket is not in partial deliver mode.
1066          */
1067         if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1068                 /* Is partial delivery possible?  */
1069                 event = sctp_ulpq_retrieve_first(ulpq);
1070                 /* Send event to the ULP.   */
1071                 if (event) {
1072                         sctp_ulpq_tail_event(ulpq, event);
1073                         sctp_ulpq_set_pd(ulpq);
1074                         return;
1075                 }
1076         }
1077 }
1078 
1079 /* Renege some packets to make room for an incoming chunk.  */
1080 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1081                       gfp_t gfp)
1082 {
1083         struct sctp_association *asoc;
1084         __u16 needed, freed;
1085 
1086         asoc = ulpq->asoc;
1087 
1088         if (chunk) {
1089                 needed = ntohs(chunk->chunk_hdr->length);
1090                 needed -= sizeof(sctp_data_chunk_t);
1091         } else
1092                 needed = SCTP_DEFAULT_MAXWINDOW;
1093 
1094         freed = 0;
1095 
1096         if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1097                 freed = sctp_ulpq_renege_order(ulpq, needed);
1098                 if (freed < needed) {
1099                         freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1100                 }
1101         }
1102         /* If able to free enough room, accept this chunk. */
1103         if (chunk && (freed >= needed)) {
1104                 int retval;
1105                 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1106                 /*
1107                  * Enter partial delivery if chunk has not been
1108                  * delivered; otherwise, drain the reassembly queue.
1109                  */
1110                 if (retval <= 0)
1111                         sctp_ulpq_partial_delivery(ulpq, gfp);
1112                 else if (retval == 1)
1113                         sctp_ulpq_reasm_drain(ulpq);
1114         }
1115 
1116         sk_mem_reclaim(asoc->base.sk);
1117 }
1118 
1119 
1120 
1121 /* Notify the application if an association is aborted and in
1122  * partial delivery mode.  Send up any pending received messages.
1123  */
1124 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1125 {
1126         struct sctp_ulpevent *ev = NULL;
1127         struct sock *sk;
1128 
1129         if (!ulpq->pd_mode)
1130                 return;
1131 
1132         sk = ulpq->asoc->base.sk;
1133         if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1134                                        &sctp_sk(sk)->subscribe))
1135                 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1136                                               SCTP_PARTIAL_DELIVERY_ABORTED,
1137                                               gfp);
1138         if (ev)
1139                 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1140 
1141         /* If there is data waiting, send it up the socket now. */
1142         if (sctp_ulpq_clear_pd(ulpq) || ev)
1143                 sk->sk_data_ready(sk);
1144 }
1145 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp