~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/net/sctp/sm_sideeffect.c

Version: ~ [ linux-5.8-rc4 ] ~ [ linux-5.7.7 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.50 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.131 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.187 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.229 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.229 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SCTP kernel implementation
  2  * (C) Copyright IBM Corp. 2001, 2004
  3  * Copyright (c) 1999 Cisco, Inc.
  4  * Copyright (c) 1999-2001 Motorola, Inc.
  5  *
  6  * This file is part of the SCTP kernel implementation
  7  *
  8  * These functions work with the state functions in sctp_sm_statefuns.c
  9  * to implement that state operations.  These functions implement the
 10  * steps which require modifying existing data structures.
 11  *
 12  * This SCTP implementation is free software;
 13  * you can redistribute it and/or modify it under the terms of
 14  * the GNU General Public License as published by
 15  * the Free Software Foundation; either version 2, or (at your option)
 16  * any later version.
 17  *
 18  * This SCTP implementation is distributed in the hope that it
 19  * will be useful, but WITHOUT ANY WARRANTY; without even the implied
 20  *                 ************************
 21  * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
 22  * See the GNU General Public License for more details.
 23  *
 24  * You should have received a copy of the GNU General Public License
 25  * along with GNU CC; see the file COPYING.  If not, write to
 26  * the Free Software Foundation, 59 Temple Place - Suite 330,
 27  * Boston, MA 02111-1307, USA.
 28  *
 29  * Please send any bug reports or fixes you make to the
 30  * email address(es):
 31  *    lksctp developers <linux-sctp@vger.kernel.org>
 32  *
 33  * Written or modified by:
 34  *    La Monte H.P. Yarroll <piggy@acm.org>
 35  *    Karl Knutson          <karl@athena.chicago.il.us>
 36  *    Jon Grimm             <jgrimm@austin.ibm.com>
 37  *    Hui Huang             <hui.huang@nokia.com>
 38  *    Dajiang Zhang         <dajiang.zhang@nokia.com>
 39  *    Daisy Chang           <daisyc@us.ibm.com>
 40  *    Sridhar Samudrala     <sri@us.ibm.com>
 41  *    Ardelle Fan           <ardelle.fan@intel.com>
 42  */
 43 
 44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 45 
 46 #include <linux/skbuff.h>
 47 #include <linux/types.h>
 48 #include <linux/socket.h>
 49 #include <linux/ip.h>
 50 #include <linux/gfp.h>
 51 #include <net/sock.h>
 52 #include <net/sctp/sctp.h>
 53 #include <net/sctp/sm.h>
 54 
 55 static int sctp_cmd_interpreter(sctp_event_t event_type,
 56                                 sctp_subtype_t subtype,
 57                                 sctp_state_t state,
 58                                 struct sctp_endpoint *ep,
 59                                 struct sctp_association *asoc,
 60                                 void *event_arg,
 61                                 sctp_disposition_t status,
 62                                 sctp_cmd_seq_t *commands,
 63                                 gfp_t gfp);
 64 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
 65                              sctp_state_t state,
 66                              struct sctp_endpoint *ep,
 67                              struct sctp_association *asoc,
 68                              void *event_arg,
 69                              sctp_disposition_t status,
 70                              sctp_cmd_seq_t *commands,
 71                              gfp_t gfp);
 72 
 73 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
 74                                      struct sctp_transport *t);
 75 /********************************************************************
 76  * Helper functions
 77  ********************************************************************/
 78 
 79 /* A helper function for delayed processing of INET ECN CE bit. */
 80 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
 81                                 __u32 lowest_tsn)
 82 {
 83         /* Save the TSN away for comparison when we receive CWR */
 84 
 85         asoc->last_ecne_tsn = lowest_tsn;
 86         asoc->need_ecne = 1;
 87 }
 88 
 89 /* Helper function for delayed processing of SCTP ECNE chunk.  */
 90 /* RFC 2960 Appendix A
 91  *
 92  * RFC 2481 details a specific bit for a sender to send in
 93  * the header of its next outbound TCP segment to indicate to
 94  * its peer that it has reduced its congestion window.  This
 95  * is termed the CWR bit.  For SCTP the same indication is made
 96  * by including the CWR chunk.  This chunk contains one data
 97  * element, i.e. the TSN number that was sent in the ECNE chunk.
 98  * This element represents the lowest TSN number in the datagram
 99  * that was originally marked with the CE bit.
100  */
101 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
102                                            __u32 lowest_tsn,
103                                            struct sctp_chunk *chunk)
104 {
105         struct sctp_chunk *repl;
106 
107         /* Our previously transmitted packet ran into some congestion
108          * so we should take action by reducing cwnd and ssthresh
109          * and then ACK our peer that we we've done so by
110          * sending a CWR.
111          */
112 
113         /* First, try to determine if we want to actually lower
114          * our cwnd variables.  Only lower them if the ECNE looks more
115          * recent than the last response.
116          */
117         if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
118                 struct sctp_transport *transport;
119 
120                 /* Find which transport's congestion variables
121                  * need to be adjusted.
122                  */
123                 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
124 
125                 /* Update the congestion variables. */
126                 if (transport)
127                         sctp_transport_lower_cwnd(transport,
128                                                   SCTP_LOWER_CWND_ECNE);
129                 asoc->last_cwr_tsn = lowest_tsn;
130         }
131 
132         /* Always try to quiet the other end.  In case of lost CWR,
133          * resend last_cwr_tsn.
134          */
135         repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
136 
137         /* If we run out of memory, it will look like a lost CWR.  We'll
138          * get back in sync eventually.
139          */
140         return repl;
141 }
142 
143 /* Helper function to do delayed processing of ECN CWR chunk.  */
144 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
145                                  __u32 lowest_tsn)
146 {
147         /* Turn off ECNE getting auto-prepended to every outgoing
148          * packet
149          */
150         asoc->need_ecne = 0;
151 }
152 
153 /* Generate SACK if necessary.  We call this at the end of a packet.  */
154 static int sctp_gen_sack(struct sctp_association *asoc, int force,
155                          sctp_cmd_seq_t *commands)
156 {
157         __u32 ctsn, max_tsn_seen;
158         struct sctp_chunk *sack;
159         struct sctp_transport *trans = asoc->peer.last_data_from;
160         int error = 0;
161 
162         if (force ||
163             (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
164             (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
165                 asoc->peer.sack_needed = 1;
166 
167         ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
168         max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
169 
170         /* From 12.2 Parameters necessary per association (i.e. the TCB):
171          *
172          * Ack State : This flag indicates if the next received packet
173          *           : is to be responded to with a SACK. ...
174          *           : When DATA chunks are out of order, SACK's
175          *           : are not delayed (see Section 6).
176          *
177          * [This is actually not mentioned in Section 6, but we
178          * implement it here anyway. --piggy]
179          */
180         if (max_tsn_seen != ctsn)
181                 asoc->peer.sack_needed = 1;
182 
183         /* From 6.2  Acknowledgement on Reception of DATA Chunks:
184          *
185          * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
186          * an acknowledgement SHOULD be generated for at least every
187          * second packet (not every second DATA chunk) received, and
188          * SHOULD be generated within 200 ms of the arrival of any
189          * unacknowledged DATA chunk. ...
190          */
191         if (!asoc->peer.sack_needed) {
192                 asoc->peer.sack_cnt++;
193 
194                 /* Set the SACK delay timeout based on the
195                  * SACK delay for the last transport
196                  * data was received from, or the default
197                  * for the association.
198                  */
199                 if (trans) {
200                         /* We will need a SACK for the next packet.  */
201                         if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
202                                 asoc->peer.sack_needed = 1;
203 
204                         asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
205                                 trans->sackdelay;
206                 } else {
207                         /* We will need a SACK for the next packet.  */
208                         if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
209                                 asoc->peer.sack_needed = 1;
210 
211                         asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
212                                 asoc->sackdelay;
213                 }
214 
215                 /* Restart the SACK timer. */
216                 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
217                                 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
218         } else {
219                 asoc->a_rwnd = asoc->rwnd;
220                 sack = sctp_make_sack(asoc);
221                 if (!sack)
222                         goto nomem;
223 
224                 asoc->peer.sack_needed = 0;
225                 asoc->peer.sack_cnt = 0;
226 
227                 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
228 
229                 /* Stop the SACK timer.  */
230                 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
231                                 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
232         }
233 
234         return error;
235 nomem:
236         error = -ENOMEM;
237         return error;
238 }
239 
240 /* When the T3-RTX timer expires, it calls this function to create the
241  * relevant state machine event.
242  */
243 void sctp_generate_t3_rtx_event(unsigned long peer)
244 {
245         int error;
246         struct sctp_transport *transport = (struct sctp_transport *) peer;
247         struct sctp_association *asoc = transport->asoc;
248         struct sock *sk = asoc->base.sk;
249         struct net *net = sock_net(sk);
250 
251         /* Check whether a task is in the sock.  */
252 
253         sctp_bh_lock_sock(sk);
254         if (sock_owned_by_user(sk)) {
255                 pr_debug("%s: sock is busy\n", __func__);
256 
257                 /* Try again later.  */
258                 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
259                         sctp_transport_hold(transport);
260                 goto out_unlock;
261         }
262 
263         /* Is this transport really dead and just waiting around for
264          * the timer to let go of the reference?
265          */
266         if (transport->dead)
267                 goto out_unlock;
268 
269         /* Run through the state machine.  */
270         error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
271                            SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
272                            asoc->state,
273                            asoc->ep, asoc,
274                            transport, GFP_ATOMIC);
275 
276         if (error)
277                 sk->sk_err = -error;
278 
279 out_unlock:
280         sctp_bh_unlock_sock(sk);
281         sctp_transport_put(transport);
282 }
283 
284 /* This is a sa interface for producing timeout events.  It works
285  * for timeouts which use the association as their parameter.
286  */
287 static void sctp_generate_timeout_event(struct sctp_association *asoc,
288                                         sctp_event_timeout_t timeout_type)
289 {
290         struct sock *sk = asoc->base.sk;
291         struct net *net = sock_net(sk);
292         int error = 0;
293 
294         sctp_bh_lock_sock(sk);
295         if (sock_owned_by_user(sk)) {
296                 pr_debug("%s: sock is busy: timer %d\n", __func__,
297                          timeout_type);
298 
299                 /* Try again later.  */
300                 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
301                         sctp_association_hold(asoc);
302                 goto out_unlock;
303         }
304 
305         /* Is this association really dead and just waiting around for
306          * the timer to let go of the reference?
307          */
308         if (asoc->base.dead)
309                 goto out_unlock;
310 
311         /* Run through the state machine.  */
312         error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
313                            SCTP_ST_TIMEOUT(timeout_type),
314                            asoc->state, asoc->ep, asoc,
315                            (void *)timeout_type, GFP_ATOMIC);
316 
317         if (error)
318                 sk->sk_err = -error;
319 
320 out_unlock:
321         sctp_bh_unlock_sock(sk);
322         sctp_association_put(asoc);
323 }
324 
325 static void sctp_generate_t1_cookie_event(unsigned long data)
326 {
327         struct sctp_association *asoc = (struct sctp_association *) data;
328         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
329 }
330 
331 static void sctp_generate_t1_init_event(unsigned long data)
332 {
333         struct sctp_association *asoc = (struct sctp_association *) data;
334         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
335 }
336 
337 static void sctp_generate_t2_shutdown_event(unsigned long data)
338 {
339         struct sctp_association *asoc = (struct sctp_association *) data;
340         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
341 }
342 
343 static void sctp_generate_t4_rto_event(unsigned long data)
344 {
345         struct sctp_association *asoc = (struct sctp_association *) data;
346         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
347 }
348 
349 static void sctp_generate_t5_shutdown_guard_event(unsigned long data)
350 {
351         struct sctp_association *asoc = (struct sctp_association *)data;
352         sctp_generate_timeout_event(asoc,
353                                     SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
354 
355 } /* sctp_generate_t5_shutdown_guard_event() */
356 
357 static void sctp_generate_autoclose_event(unsigned long data)
358 {
359         struct sctp_association *asoc = (struct sctp_association *) data;
360         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
361 }
362 
363 /* Generate a heart beat event.  If the sock is busy, reschedule.   Make
364  * sure that the transport is still valid.
365  */
366 void sctp_generate_heartbeat_event(unsigned long data)
367 {
368         int error = 0;
369         struct sctp_transport *transport = (struct sctp_transport *) data;
370         struct sctp_association *asoc = transport->asoc;
371         struct sock *sk = asoc->base.sk;
372         struct net *net = sock_net(sk);
373 
374         sctp_bh_lock_sock(sk);
375         if (sock_owned_by_user(sk)) {
376                 pr_debug("%s: sock is busy\n", __func__);
377 
378                 /* Try again later.  */
379                 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
380                         sctp_transport_hold(transport);
381                 goto out_unlock;
382         }
383 
384         /* Is this structure just waiting around for us to actually
385          * get destroyed?
386          */
387         if (transport->dead)
388                 goto out_unlock;
389 
390         error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
391                            SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
392                            asoc->state, asoc->ep, asoc,
393                            transport, GFP_ATOMIC);
394 
395          if (error)
396                  sk->sk_err = -error;
397 
398 out_unlock:
399         sctp_bh_unlock_sock(sk);
400         sctp_transport_put(transport);
401 }
402 
403 /* Handle the timeout of the ICMP protocol unreachable timer.  Trigger
404  * the correct state machine transition that will close the association.
405  */
406 void sctp_generate_proto_unreach_event(unsigned long data)
407 {
408         struct sctp_transport *transport = (struct sctp_transport *) data;
409         struct sctp_association *asoc = transport->asoc;
410         struct sock *sk = asoc->base.sk;
411         struct net *net = sock_net(sk);
412         
413         sctp_bh_lock_sock(sk);
414         if (sock_owned_by_user(sk)) {
415                 pr_debug("%s: sock is busy\n", __func__);
416 
417                 /* Try again later.  */
418                 if (!mod_timer(&transport->proto_unreach_timer,
419                                 jiffies + (HZ/20)))
420                         sctp_association_hold(asoc);
421                 goto out_unlock;
422         }
423 
424         /* Is this structure just waiting around for us to actually
425          * get destroyed?
426          */
427         if (asoc->base.dead)
428                 goto out_unlock;
429 
430         sctp_do_sm(net, SCTP_EVENT_T_OTHER,
431                    SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
432                    asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
433 
434 out_unlock:
435         sctp_bh_unlock_sock(sk);
436         sctp_association_put(asoc);
437 }
438 
439 
440 /* Inject a SACK Timeout event into the state machine.  */
441 static void sctp_generate_sack_event(unsigned long data)
442 {
443         struct sctp_association *asoc = (struct sctp_association *) data;
444         sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
445 }
446 
447 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
448         NULL,
449         sctp_generate_t1_cookie_event,
450         sctp_generate_t1_init_event,
451         sctp_generate_t2_shutdown_event,
452         NULL,
453         sctp_generate_t4_rto_event,
454         sctp_generate_t5_shutdown_guard_event,
455         NULL,
456         sctp_generate_sack_event,
457         sctp_generate_autoclose_event,
458 };
459 
460 
461 /* RFC 2960 8.2 Path Failure Detection
462  *
463  * When its peer endpoint is multi-homed, an endpoint should keep a
464  * error counter for each of the destination transport addresses of the
465  * peer endpoint.
466  *
467  * Each time the T3-rtx timer expires on any address, or when a
468  * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
469  * the error counter of that destination address will be incremented.
470  * When the value in the error counter exceeds the protocol parameter
471  * 'Path.Max.Retrans' of that destination address, the endpoint should
472  * mark the destination transport address as inactive, and a
473  * notification SHOULD be sent to the upper layer.
474  *
475  */
476 static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
477                                          struct sctp_association *asoc,
478                                          struct sctp_transport *transport,
479                                          int is_hb)
480 {
481         /* The check for association's overall error counter exceeding the
482          * threshold is done in the state function.
483          */
484         /* We are here due to a timer expiration.  If the timer was
485          * not a HEARTBEAT, then normal error tracking is done.
486          * If the timer was a heartbeat, we only increment error counts
487          * when we already have an outstanding HEARTBEAT that has not
488          * been acknowledged.
489          * Additionally, some tranport states inhibit error increments.
490          */
491         if (!is_hb) {
492                 asoc->overall_error_count++;
493                 if (transport->state != SCTP_INACTIVE)
494                         transport->error_count++;
495          } else if (transport->hb_sent) {
496                 if (transport->state != SCTP_UNCONFIRMED)
497                         asoc->overall_error_count++;
498                 if (transport->state != SCTP_INACTIVE)
499                         transport->error_count++;
500         }
501 
502         /* If the transport error count is greater than the pf_retrans
503          * threshold, and less than pathmaxrtx, then mark this transport
504          * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
505          * point 1
506          */
507         if ((transport->state != SCTP_PF) &&
508            (asoc->pf_retrans < transport->pathmaxrxt) &&
509            (transport->error_count > asoc->pf_retrans)) {
510 
511                 sctp_assoc_control_transport(asoc, transport,
512                                              SCTP_TRANSPORT_PF,
513                                              0);
514 
515                 /* Update the hb timer to resend a heartbeat every rto */
516                 sctp_cmd_hb_timer_update(commands, transport);
517         }
518 
519         if (transport->state != SCTP_INACTIVE &&
520             (transport->error_count > transport->pathmaxrxt)) {
521                 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
522                          __func__, asoc, &transport->ipaddr.sa);
523 
524                 sctp_assoc_control_transport(asoc, transport,
525                                              SCTP_TRANSPORT_DOWN,
526                                              SCTP_FAILED_THRESHOLD);
527         }
528 
529         /* E2) For the destination address for which the timer
530          * expires, set RTO <- RTO * 2 ("back off the timer").  The
531          * maximum value discussed in rule C7 above (RTO.max) may be
532          * used to provide an upper bound to this doubling operation.
533          *
534          * Special Case:  the first HB doesn't trigger exponential backoff.
535          * The first unacknowledged HB triggers it.  We do this with a flag
536          * that indicates that we have an outstanding HB.
537          */
538         if (!is_hb || transport->hb_sent) {
539                 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
540                 sctp_max_rto(asoc, transport);
541         }
542 }
543 
544 /* Worker routine to handle INIT command failure.  */
545 static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
546                                  struct sctp_association *asoc,
547                                  unsigned int error)
548 {
549         struct sctp_ulpevent *event;
550 
551         event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
552                                                 (__u16)error, 0, 0, NULL,
553                                                 GFP_ATOMIC);
554 
555         if (event)
556                 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
557                                 SCTP_ULPEVENT(event));
558 
559         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
560                         SCTP_STATE(SCTP_STATE_CLOSED));
561 
562         /* SEND_FAILED sent later when cleaning up the association. */
563         asoc->outqueue.error = error;
564         sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
565 }
566 
567 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED.  */
568 static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
569                                   struct sctp_association *asoc,
570                                   sctp_event_t event_type,
571                                   sctp_subtype_t subtype,
572                                   struct sctp_chunk *chunk,
573                                   unsigned int error)
574 {
575         struct sctp_ulpevent *event;
576         struct sctp_chunk *abort;
577         /* Cancel any partial delivery in progress. */
578         sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
579 
580         if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
581                 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
582                                                 (__u16)error, 0, 0, chunk,
583                                                 GFP_ATOMIC);
584         else
585                 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
586                                                 (__u16)error, 0, 0, NULL,
587                                                 GFP_ATOMIC);
588         if (event)
589                 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
590                                 SCTP_ULPEVENT(event));
591 
592         if (asoc->overall_error_count >= asoc->max_retrans) {
593                 abort = sctp_make_violation_max_retrans(asoc, chunk);
594                 if (abort)
595                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
596                                         SCTP_CHUNK(abort));
597         }
598 
599         sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
600                         SCTP_STATE(SCTP_STATE_CLOSED));
601 
602         /* SEND_FAILED sent later when cleaning up the association. */
603         asoc->outqueue.error = error;
604         sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
605 }
606 
607 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
608  * inside the cookie.  In reality, this is only used for INIT-ACK processing
609  * since all other cases use "temporary" associations and can do all
610  * their work in statefuns directly.
611  */
612 static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
613                                  struct sctp_association *asoc,
614                                  struct sctp_chunk *chunk,
615                                  sctp_init_chunk_t *peer_init,
616                                  gfp_t gfp)
617 {
618         int error;
619 
620         /* We only process the init as a sideeffect in a single
621          * case.   This is when we process the INIT-ACK.   If we
622          * fail during INIT processing (due to malloc problems),
623          * just return the error and stop processing the stack.
624          */
625         if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
626                 error = -ENOMEM;
627         else
628                 error = 0;
629 
630         return error;
631 }
632 
633 /* Helper function to break out starting up of heartbeat timers.  */
634 static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
635                                      struct sctp_association *asoc)
636 {
637         struct sctp_transport *t;
638 
639         /* Start a heartbeat timer for each transport on the association.
640          * hold a reference on the transport to make sure none of
641          * the needed data structures go away.
642          */
643         list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
644 
645                 if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
646                         sctp_transport_hold(t);
647         }
648 }
649 
650 static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
651                                     struct sctp_association *asoc)
652 {
653         struct sctp_transport *t;
654 
655         /* Stop all heartbeat timers. */
656 
657         list_for_each_entry(t, &asoc->peer.transport_addr_list,
658                         transports) {
659                 if (del_timer(&t->hb_timer))
660                         sctp_transport_put(t);
661         }
662 }
663 
664 /* Helper function to stop any pending T3-RTX timers */
665 static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds,
666                                         struct sctp_association *asoc)
667 {
668         struct sctp_transport *t;
669 
670         list_for_each_entry(t, &asoc->peer.transport_addr_list,
671                         transports) {
672                 if (del_timer(&t->T3_rtx_timer))
673                         sctp_transport_put(t);
674         }
675 }
676 
677 
678 /* Helper function to update the heartbeat timer. */
679 static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
680                                      struct sctp_transport *t)
681 {
682         /* Update the heartbeat timer.  */
683         if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
684                 sctp_transport_hold(t);
685 }
686 
687 /* Helper function to handle the reception of an HEARTBEAT ACK.  */
688 static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
689                                   struct sctp_association *asoc,
690                                   struct sctp_transport *t,
691                                   struct sctp_chunk *chunk)
692 {
693         sctp_sender_hb_info_t *hbinfo;
694         int was_unconfirmed = 0;
695 
696         /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
697          * HEARTBEAT should clear the error counter of the destination
698          * transport address to which the HEARTBEAT was sent.
699          */
700         t->error_count = 0;
701 
702         /*
703          * Although RFC4960 specifies that the overall error count must
704          * be cleared when a HEARTBEAT ACK is received, we make an
705          * exception while in SHUTDOWN PENDING. If the peer keeps its
706          * window shut forever, we may never be able to transmit our
707          * outstanding data and rely on the retransmission limit be reached
708          * to shutdown the association.
709          */
710         if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
711                 t->asoc->overall_error_count = 0;
712 
713         /* Clear the hb_sent flag to signal that we had a good
714          * acknowledgement.
715          */
716         t->hb_sent = 0;
717 
718         /* Mark the destination transport address as active if it is not so
719          * marked.
720          */
721         if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
722                 was_unconfirmed = 1;
723                 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
724                                              SCTP_HEARTBEAT_SUCCESS);
725         }
726 
727         if (t->state == SCTP_PF)
728                 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
729                                              SCTP_HEARTBEAT_SUCCESS);
730 
731         /* HB-ACK was received for a the proper HB.  Consider this
732          * forward progress.
733          */
734         if (t->dst)
735                 dst_confirm(t->dst);
736 
737         /* The receiver of the HEARTBEAT ACK should also perform an
738          * RTT measurement for that destination transport address
739          * using the time value carried in the HEARTBEAT ACK chunk.
740          * If the transport's rto_pending variable has been cleared,
741          * it was most likely due to a retransmit.  However, we want
742          * to re-enable it to properly update the rto.
743          */
744         if (t->rto_pending == 0)
745                 t->rto_pending = 1;
746 
747         hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
748         sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
749 
750         /* Update the heartbeat timer.  */
751         if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
752                 sctp_transport_hold(t);
753 
754         if (was_unconfirmed && asoc->peer.transport_count == 1)
755                 sctp_transport_immediate_rtx(t);
756 }
757 
758 
759 /* Helper function to process the process SACK command.  */
760 static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
761                                  struct sctp_association *asoc,
762                                  struct sctp_chunk *chunk)
763 {
764         int err = 0;
765 
766         if (sctp_outq_sack(&asoc->outqueue, chunk)) {
767                 struct net *net = sock_net(asoc->base.sk);
768 
769                 /* There are no more TSNs awaiting SACK.  */
770                 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
771                                  SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
772                                  asoc->state, asoc->ep, asoc, NULL,
773                                  GFP_ATOMIC);
774         }
775 
776         return err;
777 }
778 
779 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
780  * the transport for a shutdown chunk.
781  */
782 static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
783                               struct sctp_association *asoc,
784                               struct sctp_chunk *chunk)
785 {
786         struct sctp_transport *t;
787 
788         if (chunk->transport)
789                 t = chunk->transport;
790         else {
791                 t = sctp_assoc_choose_alter_transport(asoc,
792                                               asoc->shutdown_last_sent_to);
793                 chunk->transport = t;
794         }
795         asoc->shutdown_last_sent_to = t;
796         asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
797 }
798 
799 /* Helper function to change the state of an association. */
800 static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
801                                struct sctp_association *asoc,
802                                sctp_state_t state)
803 {
804         struct sock *sk = asoc->base.sk;
805 
806         asoc->state = state;
807 
808         pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
809 
810         if (sctp_style(sk, TCP)) {
811                 /* Change the sk->sk_state of a TCP-style socket that has
812                  * successfully completed a connect() call.
813                  */
814                 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
815                         sk->sk_state = SCTP_SS_ESTABLISHED;
816 
817                 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
818                 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
819                     sctp_sstate(sk, ESTABLISHED))
820                         sk->sk_shutdown |= RCV_SHUTDOWN;
821         }
822 
823         if (sctp_state(asoc, COOKIE_WAIT)) {
824                 /* Reset init timeouts since they may have been
825                  * increased due to timer expirations.
826                  */
827                 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
828                                                 asoc->rto_initial;
829                 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
830                                                 asoc->rto_initial;
831         }
832 
833         if (sctp_state(asoc, ESTABLISHED) ||
834             sctp_state(asoc, CLOSED) ||
835             sctp_state(asoc, SHUTDOWN_RECEIVED)) {
836                 /* Wake up any processes waiting in the asoc's wait queue in
837                  * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
838                  */
839                 if (waitqueue_active(&asoc->wait))
840                         wake_up_interruptible(&asoc->wait);
841 
842                 /* Wake up any processes waiting in the sk's sleep queue of
843                  * a TCP-style or UDP-style peeled-off socket in
844                  * sctp_wait_for_accept() or sctp_wait_for_packet().
845                  * For a UDP-style socket, the waiters are woken up by the
846                  * notifications.
847                  */
848                 if (!sctp_style(sk, UDP))
849                         sk->sk_state_change(sk);
850         }
851 }
852 
853 /* Helper function to delete an association. */
854 static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
855                                 struct sctp_association *asoc)
856 {
857         struct sock *sk = asoc->base.sk;
858 
859         /* If it is a non-temporary association belonging to a TCP-style
860          * listening socket that is not closed, do not free it so that accept()
861          * can pick it up later.
862          */
863         if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
864             (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
865                 return;
866 
867         sctp_unhash_established(asoc);
868         sctp_association_free(asoc);
869 }
870 
871 /*
872  * ADDIP Section 4.1 ASCONF Chunk Procedures
873  * A4) Start a T-4 RTO timer, using the RTO value of the selected
874  * destination address (we use active path instead of primary path just
875  * because primary path may be inactive.
876  */
877 static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
878                                 struct sctp_association *asoc,
879                                 struct sctp_chunk *chunk)
880 {
881         struct sctp_transport *t;
882 
883         t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
884         asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
885         chunk->transport = t;
886 }
887 
888 /* Process an incoming Operation Error Chunk. */
889 static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
890                                    struct sctp_association *asoc,
891                                    struct sctp_chunk *chunk)
892 {
893         struct sctp_errhdr *err_hdr;
894         struct sctp_ulpevent *ev;
895 
896         while (chunk->chunk_end > chunk->skb->data) {
897                 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
898 
899                 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
900                                                      GFP_ATOMIC);
901                 if (!ev)
902                         return;
903 
904                 sctp_ulpq_tail_event(&asoc->ulpq, ev);
905 
906                 switch (err_hdr->cause) {
907                 case SCTP_ERROR_UNKNOWN_CHUNK:
908                 {
909                         sctp_chunkhdr_t *unk_chunk_hdr;
910 
911                         unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable;
912                         switch (unk_chunk_hdr->type) {
913                         /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
914                          * an ERROR chunk reporting that it did not recognized
915                          * the ASCONF chunk type, the sender of the ASCONF MUST
916                          * NOT send any further ASCONF chunks and MUST stop its
917                          * T-4 timer.
918                          */
919                         case SCTP_CID_ASCONF:
920                                 if (asoc->peer.asconf_capable == 0)
921                                         break;
922 
923                                 asoc->peer.asconf_capable = 0;
924                                 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
925                                         SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
926                                 break;
927                         default:
928                                 break;
929                         }
930                         break;
931                 }
932                 default:
933                         break;
934                 }
935         }
936 }
937 
938 /* Process variable FWDTSN chunk information. */
939 static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq,
940                                     struct sctp_chunk *chunk)
941 {
942         struct sctp_fwdtsn_skip *skip;
943         /* Walk through all the skipped SSNs */
944         sctp_walk_fwdtsn(skip, chunk) {
945                 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
946         }
947 }
948 
949 /* Helper function to remove the association non-primary peer
950  * transports.
951  */
952 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
953 {
954         struct sctp_transport *t;
955         struct list_head *pos;
956         struct list_head *temp;
957 
958         list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
959                 t = list_entry(pos, struct sctp_transport, transports);
960                 if (!sctp_cmp_addr_exact(&t->ipaddr,
961                                          &asoc->peer.primary_addr)) {
962                         sctp_assoc_del_peer(asoc, &t->ipaddr);
963                 }
964         }
965 }
966 
967 /* Helper function to set sk_err on a 1-1 style socket. */
968 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
969 {
970         struct sock *sk = asoc->base.sk;
971 
972         if (!sctp_style(sk, UDP))
973                 sk->sk_err = error;
974 }
975 
976 /* Helper function to generate an association change event */
977 static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands,
978                                  struct sctp_association *asoc,
979                                  u8 state)
980 {
981         struct sctp_ulpevent *ev;
982 
983         ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
984                                             asoc->c.sinit_num_ostreams,
985                                             asoc->c.sinit_max_instreams,
986                                             NULL, GFP_ATOMIC);
987         if (ev)
988                 sctp_ulpq_tail_event(&asoc->ulpq, ev);
989 }
990 
991 /* Helper function to generate an adaptation indication event */
992 static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands,
993                                     struct sctp_association *asoc)
994 {
995         struct sctp_ulpevent *ev;
996 
997         ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
998 
999         if (ev)
1000                 sctp_ulpq_tail_event(&asoc->ulpq, ev);
1001 }
1002 
1003 
1004 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1005                                     sctp_event_timeout_t timer,
1006                                     char *name)
1007 {
1008         struct sctp_transport *t;
1009 
1010         t = asoc->init_last_sent_to;
1011         asoc->init_err_counter++;
1012 
1013         if (t->init_sent_count > (asoc->init_cycle + 1)) {
1014                 asoc->timeouts[timer] *= 2;
1015                 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1016                         asoc->timeouts[timer] = asoc->max_init_timeo;
1017                 }
1018                 asoc->init_cycle++;
1019 
1020                 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1021                          " cycle:%d timeout:%ld\n", __func__, name,
1022                          asoc->init_err_counter, asoc->init_cycle,
1023                          asoc->timeouts[timer]);
1024         }
1025 
1026 }
1027 
1028 /* Send the whole message, chunk by chunk, to the outqueue.
1029  * This way the whole message is queued up and bundling if
1030  * encouraged for small fragments.
1031  */
1032 static int sctp_cmd_send_msg(struct sctp_association *asoc,
1033                                 struct sctp_datamsg *msg)
1034 {
1035         struct sctp_chunk *chunk;
1036         int error = 0;
1037 
1038         list_for_each_entry(chunk, &msg->chunks, frag_list) {
1039                 error = sctp_outq_tail(&asoc->outqueue, chunk);
1040                 if (error)
1041                         break;
1042         }
1043 
1044         return error;
1045 }
1046 
1047 
1048 /* Sent the next ASCONF packet currently stored in the association.
1049  * This happens after the ASCONF_ACK was succeffully processed.
1050  */
1051 static void sctp_cmd_send_asconf(struct sctp_association *asoc)
1052 {
1053         struct net *net = sock_net(asoc->base.sk);
1054 
1055         /* Send the next asconf chunk from the addip chunk
1056          * queue.
1057          */
1058         if (!list_empty(&asoc->addip_chunk_list)) {
1059                 struct list_head *entry = asoc->addip_chunk_list.next;
1060                 struct sctp_chunk *asconf = list_entry(entry,
1061                                                 struct sctp_chunk, list);
1062                 list_del_init(entry);
1063 
1064                 /* Hold the chunk until an ASCONF_ACK is received. */
1065                 sctp_chunk_hold(asconf);
1066                 if (sctp_primitive_ASCONF(net, asoc, asconf))
1067                         sctp_chunk_free(asconf);
1068                 else
1069                         asoc->addip_last_asconf = asconf;
1070         }
1071 }
1072 
1073 
1074 /* These three macros allow us to pull the debugging code out of the
1075  * main flow of sctp_do_sm() to keep attention focused on the real
1076  * functionality there.
1077  */
1078 #define debug_pre_sfn() \
1079         pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1080                  ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype),   \
1081                  asoc, sctp_state_tbl[state], state_fn->name)
1082 
1083 #define debug_post_sfn() \
1084         pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1085                  sctp_status_tbl[status])
1086 
1087 #define debug_post_sfx() \
1088         pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1089                  asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1090                  sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1091 
1092 /*
1093  * This is the master state machine processing function.
1094  *
1095  * If you want to understand all of lksctp, this is a
1096  * good place to start.
1097  */
1098 int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
1099                sctp_state_t state,
1100                struct sctp_endpoint *ep,
1101                struct sctp_association *asoc,
1102                void *event_arg,
1103                gfp_t gfp)
1104 {
1105         sctp_cmd_seq_t commands;
1106         const sctp_sm_table_entry_t *state_fn;
1107         sctp_disposition_t status;
1108         int error = 0;
1109         typedef const char *(printfn_t)(sctp_subtype_t);
1110         static printfn_t *table[] = {
1111                 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1112         };
1113         printfn_t *debug_fn  __attribute__ ((unused)) = table[event_type];
1114 
1115         /* Look up the state function, run it, and then process the
1116          * side effects.  These three steps are the heart of lksctp.
1117          */
1118         state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1119 
1120         sctp_init_cmd_seq(&commands);
1121 
1122         debug_pre_sfn();
1123         status = (*state_fn->fn)(net, ep, asoc, subtype, event_arg, &commands);
1124         debug_post_sfn();
1125 
1126         error = sctp_side_effects(event_type, subtype, state,
1127                                   ep, asoc, event_arg, status,
1128                                   &commands, gfp);
1129         debug_post_sfx();
1130 
1131         return error;
1132 }
1133 
1134 /*****************************************************************
1135  * This the master state function side effect processing function.
1136  *****************************************************************/
1137 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1138                              sctp_state_t state,
1139                              struct sctp_endpoint *ep,
1140                              struct sctp_association *asoc,
1141                              void *event_arg,
1142                              sctp_disposition_t status,
1143                              sctp_cmd_seq_t *commands,
1144                              gfp_t gfp)
1145 {
1146         int error;
1147 
1148         /* FIXME - Most of the dispositions left today would be categorized
1149          * as "exceptional" dispositions.  For those dispositions, it
1150          * may not be proper to run through any of the commands at all.
1151          * For example, the command interpreter might be run only with
1152          * disposition SCTP_DISPOSITION_CONSUME.
1153          */
1154         if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1155                                                ep, asoc,
1156                                                event_arg, status,
1157                                                commands, gfp)))
1158                 goto bail;
1159 
1160         switch (status) {
1161         case SCTP_DISPOSITION_DISCARD:
1162                 pr_debug("%s: ignored sctp protocol event - state:%d, "
1163                          "event_type:%d, event_id:%d\n", __func__, state,
1164                          event_type, subtype.chunk);
1165                 break;
1166 
1167         case SCTP_DISPOSITION_NOMEM:
1168                 /* We ran out of memory, so we need to discard this
1169                  * packet.
1170                  */
1171                 /* BUG--we should now recover some memory, probably by
1172                  * reneging...
1173                  */
1174                 error = -ENOMEM;
1175                 break;
1176 
1177         case SCTP_DISPOSITION_DELETE_TCB:
1178                 /* This should now be a command. */
1179                 break;
1180 
1181         case SCTP_DISPOSITION_CONSUME:
1182         case SCTP_DISPOSITION_ABORT:
1183                 /*
1184                  * We should no longer have much work to do here as the
1185                  * real work has been done as explicit commands above.
1186                  */
1187                 break;
1188 
1189         case SCTP_DISPOSITION_VIOLATION:
1190                 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1191                                     state, subtype.chunk);
1192                 break;
1193 
1194         case SCTP_DISPOSITION_NOT_IMPL:
1195                 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1196                         state, event_type, subtype.chunk);
1197                 break;
1198 
1199         case SCTP_DISPOSITION_BUG:
1200                 pr_err("bug in state %d, event_type %d, event_id %d\n",
1201                        state, event_type, subtype.chunk);
1202                 BUG();
1203                 break;
1204 
1205         default:
1206                 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1207                        status, state, event_type, subtype.chunk);
1208                 BUG();
1209                 break;
1210         }
1211 
1212 bail:
1213         return error;
1214 }
1215 
1216 /********************************************************************
1217  * 2nd Level Abstractions
1218  ********************************************************************/
1219 
1220 /* This is the side-effect interpreter.  */
1221 static int sctp_cmd_interpreter(sctp_event_t event_type,
1222                                 sctp_subtype_t subtype,
1223                                 sctp_state_t state,
1224                                 struct sctp_endpoint *ep,
1225                                 struct sctp_association *asoc,
1226                                 void *event_arg,
1227                                 sctp_disposition_t status,
1228                                 sctp_cmd_seq_t *commands,
1229                                 gfp_t gfp)
1230 {
1231         int error = 0;
1232         int force;
1233         sctp_cmd_t *cmd;
1234         struct sctp_chunk *new_obj;
1235         struct sctp_chunk *chunk = NULL;
1236         struct sctp_packet *packet;
1237         struct timer_list *timer;
1238         unsigned long timeout;
1239         struct sctp_transport *t;
1240         struct sctp_sackhdr sackh;
1241         int local_cork = 0;
1242 
1243         if (SCTP_EVENT_T_TIMEOUT != event_type)
1244                 chunk = event_arg;
1245 
1246         /* Note:  This whole file is a huge candidate for rework.
1247          * For example, each command could either have its own handler, so
1248          * the loop would look like:
1249          *     while (cmds)
1250          *         cmd->handle(x, y, z)
1251          * --jgrimm
1252          */
1253         while (NULL != (cmd = sctp_next_cmd(commands))) {
1254                 switch (cmd->verb) {
1255                 case SCTP_CMD_NOP:
1256                         /* Do nothing. */
1257                         break;
1258 
1259                 case SCTP_CMD_NEW_ASOC:
1260                         /* Register a new association.  */
1261                         if (local_cork) {
1262                                 sctp_outq_uncork(&asoc->outqueue);
1263                                 local_cork = 0;
1264                         }
1265 
1266                         /* Register with the endpoint.  */
1267                         asoc = cmd->obj.asoc;
1268                         BUG_ON(asoc->peer.primary_path == NULL);
1269                         sctp_endpoint_add_asoc(ep, asoc);
1270                         sctp_hash_established(asoc);
1271                         break;
1272 
1273                 case SCTP_CMD_UPDATE_ASSOC:
1274                        sctp_assoc_update(asoc, cmd->obj.asoc);
1275                        break;
1276 
1277                 case SCTP_CMD_PURGE_OUTQUEUE:
1278                        sctp_outq_teardown(&asoc->outqueue);
1279                        break;
1280 
1281                 case SCTP_CMD_DELETE_TCB:
1282                         if (local_cork) {
1283                                 sctp_outq_uncork(&asoc->outqueue);
1284                                 local_cork = 0;
1285                         }
1286                         /* Delete the current association.  */
1287                         sctp_cmd_delete_tcb(commands, asoc);
1288                         asoc = NULL;
1289                         break;
1290 
1291                 case SCTP_CMD_NEW_STATE:
1292                         /* Enter a new state.  */
1293                         sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1294                         break;
1295 
1296                 case SCTP_CMD_REPORT_TSN:
1297                         /* Record the arrival of a TSN.  */
1298                         error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1299                                                  cmd->obj.u32, NULL);
1300                         break;
1301 
1302                 case SCTP_CMD_REPORT_FWDTSN:
1303                         /* Move the Cumulattive TSN Ack ahead. */
1304                         sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32);
1305 
1306                         /* purge the fragmentation queue */
1307                         sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32);
1308 
1309                         /* Abort any in progress partial delivery. */
1310                         sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
1311                         break;
1312 
1313                 case SCTP_CMD_PROCESS_FWDTSN:
1314                         sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk);
1315                         break;
1316 
1317                 case SCTP_CMD_GEN_SACK:
1318                         /* Generate a Selective ACK.
1319                          * The argument tells us whether to just count
1320                          * the packet and MAYBE generate a SACK, or
1321                          * force a SACK out.
1322                          */
1323                         force = cmd->obj.i32;
1324                         error = sctp_gen_sack(asoc, force, commands);
1325                         break;
1326 
1327                 case SCTP_CMD_PROCESS_SACK:
1328                         /* Process an inbound SACK.  */
1329                         error = sctp_cmd_process_sack(commands, asoc,
1330                                                       cmd->obj.chunk);
1331                         break;
1332 
1333                 case SCTP_CMD_GEN_INIT_ACK:
1334                         /* Generate an INIT ACK chunk.  */
1335                         new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1336                                                      0);
1337                         if (!new_obj)
1338                                 goto nomem;
1339 
1340                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1341                                         SCTP_CHUNK(new_obj));
1342                         break;
1343 
1344                 case SCTP_CMD_PEER_INIT:
1345                         /* Process a unified INIT from the peer.
1346                          * Note: Only used during INIT-ACK processing.  If
1347                          * there is an error just return to the outter
1348                          * layer which will bail.
1349                          */
1350                         error = sctp_cmd_process_init(commands, asoc, chunk,
1351                                                       cmd->obj.init, gfp);
1352                         break;
1353 
1354                 case SCTP_CMD_GEN_COOKIE_ECHO:
1355                         /* Generate a COOKIE ECHO chunk.  */
1356                         new_obj = sctp_make_cookie_echo(asoc, chunk);
1357                         if (!new_obj) {
1358                                 if (cmd->obj.chunk)
1359                                         sctp_chunk_free(cmd->obj.chunk);
1360                                 goto nomem;
1361                         }
1362                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1363                                         SCTP_CHUNK(new_obj));
1364 
1365                         /* If there is an ERROR chunk to be sent along with
1366                          * the COOKIE_ECHO, send it, too.
1367                          */
1368                         if (cmd->obj.chunk)
1369                                 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1370                                                 SCTP_CHUNK(cmd->obj.chunk));
1371 
1372                         if (new_obj->transport) {
1373                                 new_obj->transport->init_sent_count++;
1374                                 asoc->init_last_sent_to = new_obj->transport;
1375                         }
1376 
1377                         /* FIXME - Eventually come up with a cleaner way to
1378                          * enabling COOKIE-ECHO + DATA bundling during
1379                          * multihoming stale cookie scenarios, the following
1380                          * command plays with asoc->peer.retran_path to
1381                          * avoid the problem of sending the COOKIE-ECHO and
1382                          * DATA in different paths, which could result
1383                          * in the association being ABORTed if the DATA chunk
1384                          * is processed first by the server.  Checking the
1385                          * init error counter simply causes this command
1386                          * to be executed only during failed attempts of
1387                          * association establishment.
1388                          */
1389                         if ((asoc->peer.retran_path !=
1390                              asoc->peer.primary_path) &&
1391                             (asoc->init_err_counter > 0)) {
1392                                 sctp_add_cmd_sf(commands,
1393                                                 SCTP_CMD_FORCE_PRIM_RETRAN,
1394                                                 SCTP_NULL());
1395                         }
1396 
1397                         break;
1398 
1399                 case SCTP_CMD_GEN_SHUTDOWN:
1400                         /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1401                          * Reset error counts.
1402                          */
1403                         asoc->overall_error_count = 0;
1404 
1405                         /* Generate a SHUTDOWN chunk.  */
1406                         new_obj = sctp_make_shutdown(asoc, chunk);
1407                         if (!new_obj)
1408                                 goto nomem;
1409                         sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1410                                         SCTP_CHUNK(new_obj));
1411                         break;
1412 
1413                 case SCTP_CMD_CHUNK_ULP:
1414                         /* Send a chunk to the sockets layer.  */
1415                         pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1416                                  __func__, cmd->obj.chunk, &asoc->ulpq);
1417 
1418                         sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk,
1419                                             GFP_ATOMIC);
1420                         break;
1421 
1422                 case SCTP_CMD_EVENT_ULP:
1423                         /* Send a notification to the sockets layer.  */
1424                         pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1425                                  __func__, cmd->obj.ulpevent, &asoc->ulpq);
1426 
1427                         sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent);
1428                         break;
1429 
1430                 case SCTP_CMD_REPLY:
1431                         /* If an caller has not already corked, do cork. */
1432                         if (!asoc->outqueue.cork) {
1433                                 sctp_outq_cork(&asoc->outqueue);
1434                                 local_cork = 1;
1435                         }
1436                         /* Send a chunk to our peer.  */
1437                         error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk);
1438                         break;
1439 
1440                 case SCTP_CMD_SEND_PKT:
1441                         /* Send a full packet to our peer.  */
1442                         packet = cmd->obj.packet;
1443                         sctp_packet_transmit(packet);
1444                         sctp_ootb_pkt_free(packet);
1445                         break;
1446 
1447                 case SCTP_CMD_T1_RETRAN:
1448                         /* Mark a transport for retransmission.  */
1449                         sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1450                                         SCTP_RTXR_T1_RTX);
1451                         break;
1452 
1453                 case SCTP_CMD_RETRAN:
1454                         /* Mark a transport for retransmission.  */
1455                         sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1456                                         SCTP_RTXR_T3_RTX);
1457                         break;
1458 
1459                 case SCTP_CMD_ECN_CE:
1460                         /* Do delayed CE processing.   */
1461                         sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1462                         break;
1463 
1464                 case SCTP_CMD_ECN_ECNE:
1465                         /* Do delayed ECNE processing. */
1466                         new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1467                                                         chunk);
1468                         if (new_obj)
1469                                 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1470                                                 SCTP_CHUNK(new_obj));
1471                         break;
1472 
1473                 case SCTP_CMD_ECN_CWR:
1474                         /* Do delayed CWR processing.  */
1475                         sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1476                         break;
1477 
1478                 case SCTP_CMD_SETUP_T2:
1479                         sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1480                         break;
1481 
1482                 case SCTP_CMD_TIMER_START_ONCE:
1483                         timer = &asoc->timers[cmd->obj.to];
1484 
1485                         if (timer_pending(timer))
1486                                 break;
1487                         /* fall through */
1488 
1489                 case SCTP_CMD_TIMER_START:
1490                         timer = &asoc->timers[cmd->obj.to];
1491                         timeout = asoc->timeouts[cmd->obj.to];
1492                         BUG_ON(!timeout);
1493 
1494                         timer->expires = jiffies + timeout;
1495                         sctp_association_hold(asoc);
1496                         add_timer(timer);
1497                         break;
1498 
1499                 case SCTP_CMD_TIMER_RESTART:
1500                         timer = &asoc->timers[cmd->obj.to];
1501                         timeout = asoc->timeouts[cmd->obj.to];
1502                         if (!mod_timer(timer, jiffies + timeout))
1503                                 sctp_association_hold(asoc);
1504                         break;
1505 
1506                 case SCTP_CMD_TIMER_STOP:
1507                         timer = &asoc->timers[cmd->obj.to];
1508                         if (del_timer(timer))
1509                                 sctp_association_put(asoc);
1510                         break;
1511 
1512                 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1513                         chunk = cmd->obj.chunk;
1514                         t = sctp_assoc_choose_alter_transport(asoc,
1515                                                 asoc->init_last_sent_to);
1516                         asoc->init_last_sent_to = t;
1517                         chunk->transport = t;
1518                         t->init_sent_count++;
1519                         /* Set the new transport as primary */
1520                         sctp_assoc_set_primary(asoc, t);
1521                         break;
1522 
1523                 case SCTP_CMD_INIT_RESTART:
1524                         /* Do the needed accounting and updates
1525                          * associated with restarting an initialization
1526                          * timer. Only multiply the timeout by two if
1527                          * all transports have been tried at the current
1528                          * timeout.
1529                          */
1530                         sctp_cmd_t1_timer_update(asoc,
1531                                                 SCTP_EVENT_TIMEOUT_T1_INIT,
1532                                                 "INIT");
1533 
1534                         sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1535                                         SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1536                         break;
1537 
1538                 case SCTP_CMD_COOKIEECHO_RESTART:
1539                         /* Do the needed accounting and updates
1540                          * associated with restarting an initialization
1541                          * timer. Only multiply the timeout by two if
1542                          * all transports have been tried at the current
1543                          * timeout.
1544                          */
1545                         sctp_cmd_t1_timer_update(asoc,
1546                                                 SCTP_EVENT_TIMEOUT_T1_COOKIE,
1547                                                 "COOKIE");
1548 
1549                         /* If we've sent any data bundled with
1550                          * COOKIE-ECHO we need to resend.
1551                          */
1552                         list_for_each_entry(t, &asoc->peer.transport_addr_list,
1553                                         transports) {
1554                                 sctp_retransmit_mark(&asoc->outqueue, t,
1555                                             SCTP_RTXR_T1_RTX);
1556                         }
1557 
1558                         sctp_add_cmd_sf(commands,
1559                                         SCTP_CMD_TIMER_RESTART,
1560                                         SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1561                         break;
1562 
1563                 case SCTP_CMD_INIT_FAILED:
1564                         sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
1565                         break;
1566 
1567                 case SCTP_CMD_ASSOC_FAILED:
1568                         sctp_cmd_assoc_failed(commands, asoc, event_type,
1569                                               subtype, chunk, cmd->obj.err);
1570                         break;
1571 
1572                 case SCTP_CMD_INIT_COUNTER_INC:
1573                         asoc->init_err_counter++;
1574                         break;
1575 
1576                 case SCTP_CMD_INIT_COUNTER_RESET:
1577                         asoc->init_err_counter = 0;
1578                         asoc->init_cycle = 0;
1579                         list_for_each_entry(t, &asoc->peer.transport_addr_list,
1580                                             transports) {
1581                                 t->init_sent_count = 0;
1582                         }
1583                         break;
1584 
1585                 case SCTP_CMD_REPORT_DUP:
1586                         sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1587                                              cmd->obj.u32);
1588                         break;
1589 
1590                 case SCTP_CMD_REPORT_BAD_TAG:
1591                         pr_debug("%s: vtag mismatch!\n", __func__);
1592                         break;
1593 
1594                 case SCTP_CMD_STRIKE:
1595                         /* Mark one strike against a transport.  */
1596                         sctp_do_8_2_transport_strike(commands, asoc,
1597                                                     cmd->obj.transport, 0);
1598                         break;
1599 
1600                 case SCTP_CMD_TRANSPORT_IDLE:
1601                         t = cmd->obj.transport;
1602                         sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1603                         break;
1604 
1605                 case SCTP_CMD_TRANSPORT_HB_SENT:
1606                         t = cmd->obj.transport;
1607                         sctp_do_8_2_transport_strike(commands, asoc,
1608                                                      t, 1);
1609                         t->hb_sent = 1;
1610                         break;
1611 
1612                 case SCTP_CMD_TRANSPORT_ON:
1613                         t = cmd->obj.transport;
1614                         sctp_cmd_transport_on(commands, asoc, t, chunk);
1615                         break;
1616 
1617                 case SCTP_CMD_HB_TIMERS_START:
1618                         sctp_cmd_hb_timers_start(commands, asoc);
1619                         break;
1620 
1621                 case SCTP_CMD_HB_TIMER_UPDATE:
1622                         t = cmd->obj.transport;
1623                         sctp_cmd_hb_timer_update(commands, t);
1624                         break;
1625 
1626                 case SCTP_CMD_HB_TIMERS_STOP:
1627                         sctp_cmd_hb_timers_stop(commands, asoc);
1628                         break;
1629 
1630                 case SCTP_CMD_REPORT_ERROR:
1631                         error = cmd->obj.error;
1632                         break;
1633 
1634                 case SCTP_CMD_PROCESS_CTSN:
1635                         /* Dummy up a SACK for processing. */
1636                         sackh.cum_tsn_ack = cmd->obj.be32;
1637                         sackh.a_rwnd = asoc->peer.rwnd +
1638                                         asoc->outqueue.outstanding_bytes;
1639                         sackh.num_gap_ack_blocks = 0;
1640                         sackh.num_dup_tsns = 0;
1641                         chunk->subh.sack_hdr = &sackh;
1642                         sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1643                                         SCTP_CHUNK(chunk));
1644                         break;
1645 
1646                 case SCTP_CMD_DISCARD_PACKET:
1647                         /* We need to discard the whole packet.
1648                          * Uncork the queue since there might be
1649                          * responses pending
1650                          */
1651                         chunk->pdiscard = 1;
1652                         if (asoc) {
1653                                 sctp_outq_uncork(&asoc->outqueue);
1654                                 local_cork = 0;
1655                         }
1656                         break;
1657 
1658                 case SCTP_CMD_RTO_PENDING:
1659                         t = cmd->obj.transport;
1660                         t->rto_pending = 1;
1661                         break;
1662 
1663                 case SCTP_CMD_PART_DELIVER:
1664                         sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC);
1665                         break;
1666 
1667                 case SCTP_CMD_RENEGE:
1668                         sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk,
1669                                          GFP_ATOMIC);
1670                         break;
1671 
1672                 case SCTP_CMD_SETUP_T4:
1673                         sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1674                         break;
1675 
1676                 case SCTP_CMD_PROCESS_OPERR:
1677                         sctp_cmd_process_operr(commands, asoc, chunk);
1678                         break;
1679                 case SCTP_CMD_CLEAR_INIT_TAG:
1680                         asoc->peer.i.init_tag = 0;
1681                         break;
1682                 case SCTP_CMD_DEL_NON_PRIMARY:
1683                         sctp_cmd_del_non_primary(asoc);
1684                         break;
1685                 case SCTP_CMD_T3_RTX_TIMERS_STOP:
1686                         sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1687                         break;
1688                 case SCTP_CMD_FORCE_PRIM_RETRAN:
1689                         t = asoc->peer.retran_path;
1690                         asoc->peer.retran_path = asoc->peer.primary_path;
1691                         error = sctp_outq_uncork(&asoc->outqueue);
1692                         local_cork = 0;
1693                         asoc->peer.retran_path = t;
1694                         break;
1695                 case SCTP_CMD_SET_SK_ERR:
1696                         sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1697                         break;
1698                 case SCTP_CMD_ASSOC_CHANGE:
1699                         sctp_cmd_assoc_change(commands, asoc,
1700                                               cmd->obj.u8);
1701                         break;
1702                 case SCTP_CMD_ADAPTATION_IND:
1703                         sctp_cmd_adaptation_ind(commands, asoc);
1704                         break;
1705 
1706                 case SCTP_CMD_ASSOC_SHKEY:
1707                         error = sctp_auth_asoc_init_active_key(asoc,
1708                                                 GFP_ATOMIC);
1709                         break;
1710                 case SCTP_CMD_UPDATE_INITTAG:
1711                         asoc->peer.i.init_tag = cmd->obj.u32;
1712                         break;
1713                 case SCTP_CMD_SEND_MSG:
1714                         if (!asoc->outqueue.cork) {
1715                                 sctp_outq_cork(&asoc->outqueue);
1716                                 local_cork = 1;
1717                         }
1718                         error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
1719                         break;
1720                 case SCTP_CMD_SEND_NEXT_ASCONF:
1721                         sctp_cmd_send_asconf(asoc);
1722                         break;
1723                 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1724                         sctp_asconf_queue_teardown(asoc);
1725                         break;
1726 
1727                 case SCTP_CMD_SET_ASOC:
1728                         asoc = cmd->obj.asoc;
1729                         break;
1730 
1731                 default:
1732                         pr_warn("Impossible command: %u\n",
1733                                 cmd->verb);
1734                         break;
1735                 }
1736 
1737                 if (error)
1738                         break;
1739         }
1740 
1741 out:
1742         /* If this is in response to a received chunk, wait until
1743          * we are done with the packet to open the queue so that we don't
1744          * send multiple packets in response to a single request.
1745          */
1746         if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1747                 if (chunk->end_of_packet || chunk->singleton)
1748                         error = sctp_outq_uncork(&asoc->outqueue);
1749         } else if (local_cork)
1750                 error = sctp_outq_uncork(&asoc->outqueue);
1751         return error;
1752 nomem:
1753         error = -ENOMEM;
1754         goto out;
1755 }
1756 
1757 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp