linux/net/sctp/transport.c
<<
>>
Prefs
   1/* SCTP kernel implementation
   2 * Copyright (c) 1999-2000 Cisco, Inc.
   3 * Copyright (c) 1999-2001 Motorola, Inc.
   4 * Copyright (c) 2001-2003 International Business Machines Corp.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 La Monte H.P. Yarroll
   7 *
   8 * This file is part of the SCTP kernel implementation
   9 *
  10 * This module provides the abstraction for an SCTP tranport representing
  11 * a remote transport address.  For local transport addresses, we just use
  12 * union sctp_addr.
  13 *
  14 * This SCTP implementation is free software;
  15 * you can redistribute it and/or modify it under the terms of
  16 * the GNU General Public License as published by
  17 * the Free Software Foundation; either version 2, or (at your option)
  18 * any later version.
  19 *
  20 * This SCTP implementation is distributed in the hope that it
  21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  22 *                 ************************
  23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  24 * See the GNU General Public License for more details.
  25 *
  26 * You should have received a copy of the GNU General Public License
  27 * along with GNU CC; see the file COPYING.  If not, write to
  28 * the Free Software Foundation, 59 Temple Place - Suite 330,
  29 * Boston, MA 02111-1307, USA.
  30 *
  31 * Please send any bug reports or fixes you make to the
  32 * email address(es):
  33 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
  34 *
  35 * Or submit a bug report through the following website:
  36 *    http://www.sf.net/projects/lksctp
  37 *
  38 * Written or modified by:
  39 *    La Monte H.P. Yarroll <piggy@acm.org>
  40 *    Karl Knutson          <karl@athena.chicago.il.us>
  41 *    Jon Grimm             <jgrimm@us.ibm.com>
  42 *    Xingang Guo           <xingang.guo@intel.com>
  43 *    Hui Huang             <hui.huang@nokia.com>
  44 *    Sridhar Samudrala     <sri@us.ibm.com>
  45 *    Ardelle Fan           <ardelle.fan@intel.com>
  46 *
  47 * Any bugs reported given to us we will try to fix... any fixes shared will
  48 * be incorporated into the next SCTP release.
  49 */
  50
  51#include <linux/types.h>
  52#include <linux/random.h>
  53#include <net/sctp/sctp.h>
  54#include <net/sctp/sm.h>
  55
  56/* 1st Level Abstractions.  */
  57
  58/* Initialize a new transport from provided memory.  */
  59static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
  60                                                  const union sctp_addr *addr,
  61                                                  gfp_t gfp)
  62{
  63        /* Copy in the address.  */
  64        peer->ipaddr = *addr;
  65        peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
  66        peer->asoc = NULL;
  67
  68        peer->dst = NULL;
  69        memset(&peer->saddr, 0, sizeof(union sctp_addr));
  70
  71        /* From 6.3.1 RTO Calculation:
  72         *
  73         * C1) Until an RTT measurement has been made for a packet sent to the
  74         * given destination transport address, set RTO to the protocol
  75         * parameter 'RTO.Initial'.
  76         */
  77        peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
  78        peer->rtt = 0;
  79        peer->rttvar = 0;
  80        peer->srtt = 0;
  81        peer->rto_pending = 0;
  82        peer->hb_sent = 0;
  83        peer->fast_recovery = 0;
  84
  85        peer->last_time_heard = jiffies;
  86        peer->last_time_used = jiffies;
  87        peer->last_time_ecne_reduced = jiffies;
  88
  89        peer->init_sent_count = 0;
  90
  91        peer->param_flags = SPP_HB_DISABLE |
  92                            SPP_PMTUD_ENABLE |
  93                            SPP_SACKDELAY_ENABLE;
  94        peer->hbinterval  = 0;
  95
  96        /* Initialize the default path max_retrans.  */
  97        peer->pathmaxrxt  = sctp_max_retrans_path;
  98        peer->error_count = 0;
  99
 100        INIT_LIST_HEAD(&peer->transmitted);
 101        INIT_LIST_HEAD(&peer->send_ready);
 102        INIT_LIST_HEAD(&peer->transports);
 103
 104        peer->T3_rtx_timer.expires = 0;
 105        peer->hb_timer.expires = 0;
 106
 107        setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
 108                        (unsigned long)peer);
 109        setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
 110                        (unsigned long)peer);
 111
 112        /* Initialize the 64-bit random nonce sent with heartbeat. */
 113        get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
 114
 115        atomic_set(&peer->refcnt, 1);
 116        peer->dead = 0;
 117
 118        peer->malloced = 0;
 119
 120        /* Initialize the state information for SFR-CACC */
 121        peer->cacc.changeover_active = 0;
 122        peer->cacc.cycling_changeover = 0;
 123        peer->cacc.next_tsn_at_change = 0;
 124        peer->cacc.cacc_saw_newack = 0;
 125
 126        return peer;
 127}
 128
 129/* Allocate and initialize a new transport.  */
 130struct sctp_transport *sctp_transport_new(const union sctp_addr *addr,
 131                                          gfp_t gfp)
 132{
 133        struct sctp_transport *transport;
 134
 135        transport = t_new(struct sctp_transport, gfp);
 136        if (!transport)
 137                goto fail;
 138
 139        if (!sctp_transport_init(transport, addr, gfp))
 140                goto fail_init;
 141
 142        transport->malloced = 1;
 143        SCTP_DBG_OBJCNT_INC(transport);
 144
 145        return transport;
 146
 147fail_init:
 148        kfree(transport);
 149
 150fail:
 151        return NULL;
 152}
 153
 154/* This transport is no longer needed.  Free up if possible, or
 155 * delay until it last reference count.
 156 */
 157void sctp_transport_free(struct sctp_transport *transport)
 158{
 159        transport->dead = 1;
 160
 161        /* Try to delete the heartbeat timer.  */
 162        if (del_timer(&transport->hb_timer))
 163                sctp_transport_put(transport);
 164
 165        /* Delete the T3_rtx timer if it's active.
 166         * There is no point in not doing this now and letting
 167         * structure hang around in memory since we know
 168         * the tranport is going away.
 169         */
 170        if (timer_pending(&transport->T3_rtx_timer) &&
 171            del_timer(&transport->T3_rtx_timer))
 172                sctp_transport_put(transport);
 173
 174
 175        sctp_transport_put(transport);
 176}
 177
 178/* Destroy the transport data structure.
 179 * Assumes there are no more users of this structure.
 180 */
 181static void sctp_transport_destroy(struct sctp_transport *transport)
 182{
 183        SCTP_ASSERT(transport->dead, "Transport is not dead", return);
 184
 185        if (transport->asoc)
 186                sctp_association_put(transport->asoc);
 187
 188        sctp_packet_free(&transport->packet);
 189
 190        dst_release(transport->dst);
 191        kfree(transport);
 192        SCTP_DBG_OBJCNT_DEC(transport);
 193}
 194
 195/* Start T3_rtx timer if it is not already running and update the heartbeat
 196 * timer.  This routine is called every time a DATA chunk is sent.
 197 */
 198void sctp_transport_reset_timers(struct sctp_transport *transport, int force)
 199{
 200        /* RFC 2960 6.3.2 Retransmission Timer Rules
 201         *
 202         * R1) Every time a DATA chunk is sent to any address(including a
 203         * retransmission), if the T3-rtx timer of that address is not running
 204         * start it running so that it will expire after the RTO of that
 205         * address.
 206         */
 207
 208        if (force || !timer_pending(&transport->T3_rtx_timer))
 209                if (!mod_timer(&transport->T3_rtx_timer,
 210                               jiffies + transport->rto))
 211                        sctp_transport_hold(transport);
 212
 213        /* When a data chunk is sent, reset the heartbeat interval.  */
 214        if (!mod_timer(&transport->hb_timer,
 215                       sctp_transport_timeout(transport)))
 216            sctp_transport_hold(transport);
 217}
 218
 219/* This transport has been assigned to an association.
 220 * Initialize fields from the association or from the sock itself.
 221 * Register the reference count in the association.
 222 */
 223void sctp_transport_set_owner(struct sctp_transport *transport,
 224                              struct sctp_association *asoc)
 225{
 226        transport->asoc = asoc;
 227        sctp_association_hold(asoc);
 228}
 229
 230/* Initialize the pmtu of a transport. */
 231void sctp_transport_pmtu(struct sctp_transport *transport)
 232{
 233        struct dst_entry *dst;
 234
 235        dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL);
 236
 237        if (dst) {
 238                transport->pathmtu = dst_mtu(dst);
 239                dst_release(dst);
 240        } else
 241                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 242}
 243
 244/* this is a complete rip-off from __sk_dst_check
 245 * the cookie is always 0 since this is how it's used in the
 246 * pmtu code
 247 */
 248static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
 249{
 250        struct dst_entry *dst = t->dst;
 251
 252        if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) {
 253                dst_release(t->dst);
 254                t->dst = NULL;
 255                return NULL;
 256        }
 257
 258        return dst;
 259}
 260
 261void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
 262{
 263        struct dst_entry *dst;
 264
 265        if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
 266                printk(KERN_WARNING "%s: Reported pmtu %d too low, "
 267                       "using default minimum of %d\n",
 268                       __func__, pmtu,
 269                       SCTP_DEFAULT_MINSEGMENT);
 270                /* Use default minimum segment size and disable
 271                 * pmtu discovery on this transport.
 272                 */
 273                t->pathmtu = SCTP_DEFAULT_MINSEGMENT;
 274        } else {
 275                t->pathmtu = pmtu;
 276        }
 277
 278        dst = sctp_transport_dst_check(t);
 279        if (dst)
 280                dst->ops->update_pmtu(dst, pmtu);
 281}
 282
 283/* Caches the dst entry and source address for a transport's destination
 284 * address.
 285 */
 286void sctp_transport_route(struct sctp_transport *transport,
 287                          union sctp_addr *saddr, struct sctp_sock *opt)
 288{
 289        struct sctp_association *asoc = transport->asoc;
 290        struct sctp_af *af = transport->af_specific;
 291        union sctp_addr *daddr = &transport->ipaddr;
 292        struct dst_entry *dst;
 293
 294        dst = af->get_dst(asoc, daddr, saddr);
 295
 296        if (saddr)
 297                memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
 298        else
 299                af->get_saddr(opt, asoc, dst, daddr, &transport->saddr);
 300
 301        transport->dst = dst;
 302        if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
 303                return;
 304        }
 305        if (dst) {
 306                transport->pathmtu = dst_mtu(dst);
 307
 308                /* Initialize sk->sk_rcv_saddr, if the transport is the
 309                 * association's active path for getsockname().
 310                 */
 311                if (asoc && (transport == asoc->peer.active_path))
 312                        opt->pf->af->to_sk_saddr(&transport->saddr,
 313                                                 asoc->base.sk);
 314        } else
 315                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 316}
 317
 318/* Hold a reference to a transport.  */
 319void sctp_transport_hold(struct sctp_transport *transport)
 320{
 321        atomic_inc(&transport->refcnt);
 322}
 323
 324/* Release a reference to a transport and clean up
 325 * if there are no more references.
 326 */
 327void sctp_transport_put(struct sctp_transport *transport)
 328{
 329        if (atomic_dec_and_test(&transport->refcnt))
 330                sctp_transport_destroy(transport);
 331}
 332
 333/* Update transport's RTO based on the newly calculated RTT. */
 334void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
 335{
 336        /* Check for valid transport.  */
 337        SCTP_ASSERT(tp, "NULL transport", return);
 338
 339        /* We should not be doing any RTO updates unless rto_pending is set.  */
 340        SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return);
 341
 342        if (tp->rttvar || tp->srtt) {
 343                /* 6.3.1 C3) When a new RTT measurement R' is made, set
 344                 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
 345                 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
 346                 */
 347
 348                /* Note:  The above algorithm has been rewritten to
 349                 * express rto_beta and rto_alpha as inverse powers
 350                 * of two.
 351                 * For example, assuming the default value of RTO.Alpha of
 352                 * 1/8, rto_alpha would be expressed as 3.
 353                 */
 354                tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta)
 355                        + ((abs(tp->srtt - rtt)) >> sctp_rto_beta);
 356                tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha)
 357                        + (rtt >> sctp_rto_alpha);
 358        } else {
 359                /* 6.3.1 C2) When the first RTT measurement R is made, set
 360                 * SRTT <- R, RTTVAR <- R/2.
 361                 */
 362                tp->srtt = rtt;
 363                tp->rttvar = rtt >> 1;
 364        }
 365
 366        /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
 367         * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
 368         */
 369        if (tp->rttvar == 0)
 370                tp->rttvar = SCTP_CLOCK_GRANULARITY;
 371
 372        /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
 373        tp->rto = tp->srtt + (tp->rttvar << 2);
 374
 375        /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
 376         * seconds then it is rounded up to RTO.Min seconds.
 377         */
 378        if (tp->rto < tp->asoc->rto_min)
 379                tp->rto = tp->asoc->rto_min;
 380
 381        /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
 382         * at least RTO.max seconds.
 383         */
 384        if (tp->rto > tp->asoc->rto_max)
 385                tp->rto = tp->asoc->rto_max;
 386
 387        tp->rtt = rtt;
 388        tp->last_rto = tp->rto;
 389
 390        /* Reset rto_pending so that a new RTT measurement is started when a
 391         * new data chunk is sent.
 392         */
 393        tp->rto_pending = 0;
 394
 395        SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d "
 396                          "rttvar: %d, rto: %ld\n", __func__,
 397                          tp, rtt, tp->srtt, tp->rttvar, tp->rto);
 398}
 399
 400/* This routine updates the transport's cwnd and partial_bytes_acked
 401 * parameters based on the bytes acked in the received SACK.
 402 */
 403void sctp_transport_raise_cwnd(struct sctp_transport *transport,
 404                               __u32 sack_ctsn, __u32 bytes_acked)
 405{
 406        __u32 cwnd, ssthresh, flight_size, pba, pmtu;
 407
 408        cwnd = transport->cwnd;
 409        flight_size = transport->flight_size;
 410
 411        /* See if we need to exit Fast Recovery first */
 412        if (transport->fast_recovery &&
 413            TSN_lte(transport->fast_recovery_exit, sack_ctsn))
 414                transport->fast_recovery = 0;
 415
 416        /* The appropriate cwnd increase algorithm is performed if, and only
 417         * if the cumulative TSN whould advanced and the congestion window is
 418         * being fully utilized.
 419         */
 420        if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) ||
 421            (flight_size < cwnd))
 422                return;
 423
 424        ssthresh = transport->ssthresh;
 425        pba = transport->partial_bytes_acked;
 426        pmtu = transport->asoc->pathmtu;
 427
 428        if (cwnd <= ssthresh) {
 429                /* RFC 4960 7.2.1
 430                 * o  When cwnd is less than or equal to ssthresh, an SCTP
 431                 *    endpoint MUST use the slow-start algorithm to increase
 432                 *    cwnd only if the current congestion window is being fully
 433                 *    utilized, an incoming SACK advances the Cumulative TSN
 434                 *    Ack Point, and the data sender is not in Fast Recovery.
 435                 *    Only when these three conditions are met can the cwnd be
 436                 *    increased; otherwise, the cwnd MUST not be increased.
 437                 *    If these conditions are met, then cwnd MUST be increased
 438                 *    by, at most, the lesser of 1) the total size of the
 439                 *    previously outstanding DATA chunk(s) acknowledged, and
 440                 *    2) the destination's path MTU.  This upper bound protects
 441                 *    against the ACK-Splitting attack outlined in [SAVAGE99].
 442                 */
 443                if (transport->fast_recovery)
 444                        return;
 445
 446                if (bytes_acked > pmtu)
 447                        cwnd += pmtu;
 448                else
 449                        cwnd += bytes_acked;
 450                SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, "
 451                                  "bytes_acked: %d, cwnd: %d, ssthresh: %d, "
 452                                  "flight_size: %d, pba: %d\n",
 453                                  __func__,
 454                                  transport, bytes_acked, cwnd,
 455                                  ssthresh, flight_size, pba);
 456        } else {
 457                /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
 458                 * upon each SACK arrival that advances the Cumulative TSN Ack
 459                 * Point, increase partial_bytes_acked by the total number of
 460                 * bytes of all new chunks acknowledged in that SACK including
 461                 * chunks acknowledged by the new Cumulative TSN Ack and by
 462                 * Gap Ack Blocks.
 463                 *
 464                 * When partial_bytes_acked is equal to or greater than cwnd
 465                 * and before the arrival of the SACK the sender had cwnd or
 466                 * more bytes of data outstanding (i.e., before arrival of the
 467                 * SACK, flightsize was greater than or equal to cwnd),
 468                 * increase cwnd by MTU, and reset partial_bytes_acked to
 469                 * (partial_bytes_acked - cwnd).
 470                 */
 471                pba += bytes_acked;
 472                if (pba >= cwnd) {
 473                        cwnd += pmtu;
 474                        pba = ((cwnd < pba) ? (pba - cwnd) : 0);
 475                }
 476                SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: "
 477                                  "transport: %p, bytes_acked: %d, cwnd: %d, "
 478                                  "ssthresh: %d, flight_size: %d, pba: %d\n",
 479                                  __func__,
 480                                  transport, bytes_acked, cwnd,
 481                                  ssthresh, flight_size, pba);
 482        }
 483
 484        transport->cwnd = cwnd;
 485        transport->partial_bytes_acked = pba;
 486}
 487
 488/* This routine is used to lower the transport's cwnd when congestion is
 489 * detected.
 490 */
 491void sctp_transport_lower_cwnd(struct sctp_transport *transport,
 492                               sctp_lower_cwnd_t reason)
 493{
 494        switch (reason) {
 495        case SCTP_LOWER_CWND_T3_RTX:
 496                /* RFC 2960 Section 7.2.3, sctpimpguide
 497                 * When the T3-rtx timer expires on an address, SCTP should
 498                 * perform slow start by:
 499                 *      ssthresh = max(cwnd/2, 4*MTU)
 500                 *      cwnd = 1*MTU
 501                 *      partial_bytes_acked = 0
 502                 */
 503                transport->ssthresh = max(transport->cwnd/2,
 504                                          4*transport->asoc->pathmtu);
 505                transport->cwnd = transport->asoc->pathmtu;
 506                break;
 507
 508        case SCTP_LOWER_CWND_FAST_RTX:
 509                /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
 510                 * destination address(es) to which the missing DATA chunks
 511                 * were last sent, according to the formula described in
 512                 * Section 7.2.3.
 513                 *
 514                 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
 515                 * losses from SACK (see Section 7.2.4), An endpoint
 516                 * should do the following:
 517                 *      ssthresh = max(cwnd/2, 4*MTU)
 518                 *      cwnd = ssthresh
 519                 *      partial_bytes_acked = 0
 520                 */
 521                if (transport->fast_recovery)
 522                        return;
 523
 524                /* Mark Fast recovery */
 525                transport->fast_recovery = 1;
 526                transport->fast_recovery_exit = transport->asoc->next_tsn - 1;
 527
 528                transport->ssthresh = max(transport->cwnd/2,
 529                                          4*transport->asoc->pathmtu);
 530                transport->cwnd = transport->ssthresh;
 531                break;
 532
 533        case SCTP_LOWER_CWND_ECNE:
 534                /* RFC 2481 Section 6.1.2.
 535                 * If the sender receives an ECN-Echo ACK packet
 536                 * then the sender knows that congestion was encountered in the
 537                 * network on the path from the sender to the receiver. The
 538                 * indication of congestion should be treated just as a
 539                 * congestion loss in non-ECN Capable TCP. That is, the TCP
 540                 * source halves the congestion window "cwnd" and reduces the
 541                 * slow start threshold "ssthresh".
 542                 * A critical condition is that TCP does not react to
 543                 * congestion indications more than once every window of
 544                 * data (or more loosely more than once every round-trip time).
 545                 */
 546                if (time_after(jiffies, transport->last_time_ecne_reduced +
 547                                        transport->rtt)) {
 548                        transport->ssthresh = max(transport->cwnd/2,
 549                                                  4*transport->asoc->pathmtu);
 550                        transport->cwnd = transport->ssthresh;
 551                        transport->last_time_ecne_reduced = jiffies;
 552                }
 553                break;
 554
 555        case SCTP_LOWER_CWND_INACTIVE:
 556                /* RFC 2960 Section 7.2.1, sctpimpguide
 557                 * When the endpoint does not transmit data on a given
 558                 * transport address, the cwnd of the transport address
 559                 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
 560                 * NOTE: Although the draft recommends that this check needs
 561                 * to be done every RTO interval, we do it every hearbeat
 562                 * interval.
 563                 */
 564                if (time_after(jiffies, transport->last_time_used +
 565                                        transport->rto))
 566                        transport->cwnd = max(transport->cwnd/2,
 567                                                 4*transport->asoc->pathmtu);
 568                break;
 569        }
 570
 571        transport->partial_bytes_acked = 0;
 572        SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: "
 573                          "%d ssthresh: %d\n", __func__,
 574                          transport, reason,
 575                          transport->cwnd, transport->ssthresh);
 576}
 577
 578/* What is the next timeout value for this transport? */
 579unsigned long sctp_transport_timeout(struct sctp_transport *t)
 580{
 581        unsigned long timeout;
 582        timeout = t->rto + sctp_jitter(t->rto);
 583        if (t->state != SCTP_UNCONFIRMED)
 584                timeout += t->hbinterval;
 585        timeout += jiffies;
 586        return timeout;
 587}
 588
 589/* Reset transport variables to their initial values */
 590void sctp_transport_reset(struct sctp_transport *t)
 591{
 592        struct sctp_association *asoc = t->asoc;
 593
 594        /* RFC 2960 (bis), Section 5.2.4
 595         * All the congestion control parameters (e.g., cwnd, ssthresh)
 596         * related to this peer MUST be reset to their initial values
 597         * (see Section 6.2.1)
 598         */
 599        t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
 600        t->ssthresh = asoc->peer.i.a_rwnd;
 601        t->last_rto = t->rto = asoc->rto_initial;
 602        t->rtt = 0;
 603        t->srtt = 0;
 604        t->rttvar = 0;
 605
 606        /* Reset these additional varibles so that we have a clean
 607         * slate.
 608         */
 609        t->partial_bytes_acked = 0;
 610        t->flight_size = 0;
 611        t->error_count = 0;
 612        t->rto_pending = 0;
 613        t->hb_sent = 0;
 614        t->fast_recovery = 0;
 615
 616        /* Initialize the state information for SFR-CACC */
 617        t->cacc.changeover_active = 0;
 618        t->cacc.cycling_changeover = 0;
 619        t->cacc.next_tsn_at_change = 0;
 620        t->cacc.cacc_saw_newack = 0;
 621}
 622